diff options
author | Christoph Lameter <cl@linux.com> | 2012-09-04 19:38:33 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-09-05 05:00:36 -0400 |
commit | 12c3667fb780e20360ad0bde32dfb3591ef609ad (patch) | |
tree | dbced65da22c85212bf632ff6953c0a94252c3ef /mm/slab.c | |
parent | 8f4c765c22deee766319ae9a1db68325f14816e6 (diff) |
mm/sl[aou]b: Get rid of __kmem_cache_destroy
What is done there can be done in __kmem_cache_shutdown.
This affects RCU handling somewhat. On rcu free all slab allocators do
not refer to other management structures than the kmem_cache structure.
Therefore these other structures can be freed before the rcu deferred
free to the page allocator occurs.
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 46 |
1 files changed, 21 insertions, 25 deletions
@@ -2208,26 +2208,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | |||
2208 | } | 2208 | } |
2209 | } | 2209 | } |
2210 | 2210 | ||
2211 | void __kmem_cache_destroy(struct kmem_cache *cachep) | ||
2212 | { | ||
2213 | int i; | ||
2214 | struct kmem_list3 *l3; | ||
2215 | |||
2216 | for_each_online_cpu(i) | ||
2217 | kfree(cachep->array[i]); | ||
2218 | |||
2219 | /* NUMA: free the list3 structures */ | ||
2220 | for_each_online_node(i) { | ||
2221 | l3 = cachep->nodelists[i]; | ||
2222 | if (l3) { | ||
2223 | kfree(l3->shared); | ||
2224 | free_alien_cache(l3->alien); | ||
2225 | kfree(l3); | ||
2226 | } | ||
2227 | } | ||
2228 | } | ||
2229 | |||
2230 | |||
2231 | /** | 2211 | /** |
2232 | * calculate_slab_order - calculate size (page order) of slabs | 2212 | * calculate_slab_order - calculate size (page order) of slabs |
2233 | * @cachep: pointer to the cache that is being created | 2213 | * @cachep: pointer to the cache that is being created |
@@ -2364,9 +2344,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2364 | * Cannot be called within a int, but can be interrupted. | 2344 | * Cannot be called within a int, but can be interrupted. |
2365 | * The @ctor is run when new pages are allocated by the cache. | 2345 | * The @ctor is run when new pages are allocated by the cache. |
2366 | * | 2346 | * |
2367 | * @name must be valid until the cache is destroyed. This implies that | ||
2368 | * the module calling this has to destroy the cache before getting unloaded. | ||
2369 | * | ||
2370 | * The flags are | 2347 | * The flags are |
2371 | * | 2348 | * |
2372 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | 2349 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) |
@@ -2591,7 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2591 | cachep->refcount = 1; | 2568 | cachep->refcount = 1; |
2592 | 2569 | ||
2593 | if (setup_cpu_cache(cachep, gfp)) { | 2570 | if (setup_cpu_cache(cachep, gfp)) { |
2594 | __kmem_cache_destroy(cachep); | 2571 | __kmem_cache_shutdown(cachep); |
2595 | return NULL; | 2572 | return NULL; |
2596 | } | 2573 | } |
2597 | 2574 | ||
@@ -2766,7 +2743,26 @@ EXPORT_SYMBOL(kmem_cache_shrink); | |||
2766 | 2743 | ||
2767 | int __kmem_cache_shutdown(struct kmem_cache *cachep) | 2744 | int __kmem_cache_shutdown(struct kmem_cache *cachep) |
2768 | { | 2745 | { |
2769 | return __cache_shrink(cachep); | 2746 | int i; |
2747 | struct kmem_list3 *l3; | ||
2748 | int rc = __cache_shrink(cachep); | ||
2749 | |||
2750 | if (rc) | ||
2751 | return rc; | ||
2752 | |||
2753 | for_each_online_cpu(i) | ||
2754 | kfree(cachep->array[i]); | ||
2755 | |||
2756 | /* NUMA: free the list3 structures */ | ||
2757 | for_each_online_node(i) { | ||
2758 | l3 = cachep->nodelists[i]; | ||
2759 | if (l3) { | ||
2760 | kfree(l3->shared); | ||
2761 | free_alien_cache(l3->alien); | ||
2762 | kfree(l3); | ||
2763 | } | ||
2764 | } | ||
2765 | return 0; | ||
2770 | } | 2766 | } |
2771 | 2767 | ||
2772 | /* | 2768 | /* |