aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2012-10-03 02:56:37 -0400
committerPekka Enberg <penberg@kernel.org>2012-10-03 02:56:37 -0400
commitf4178cdddd4cb860a17f363fe13264fff03da7f2 (patch)
tree5ca8dc6bb09bcb2c4b959b60712d7a3f60c7a43f /mm/slub.c
parent023dc70470502f41b285112d4840f35d9075b767 (diff)
parentf28510d30c7f03daa290019fbc57ad8277347614 (diff)
Merge branch 'slab/common-for-cgroups' into slab/for-linus
Fix up a trivial conflict with NUMA_NO_NODE cleanups. Conflicts: mm/slob.c Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c145
1 files changed, 56 insertions, 89 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 97a49d9a37cd..a0d698467f70 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
212 { return 0; } 212 { return 0; }
213static inline void sysfs_slab_remove(struct kmem_cache *s) 213static inline void sysfs_slab_remove(struct kmem_cache *s) { }
214{
215 kfree(s->name);
216 kfree(s);
217}
218 214
219#endif 215#endif
220 216
@@ -626,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
626 print_trailer(s, page, object); 622 print_trailer(s, page, object);
627} 623}
628 624
629static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 625static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
630{ 626{
631 va_list args; 627 va_list args;
632 char buf[100]; 628 char buf[100];
@@ -2627,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
2627 2623
2628 page = virt_to_head_page(x); 2624 page = virt_to_head_page(x);
2629 2625
2626 if (kmem_cache_debug(s) && page->slab != s) {
2627 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
2628 " is from %s\n", page->slab->name, s->name);
2629 WARN_ON_ONCE(1);
2630 return;
2631 }
2632
2630 slab_free(s, page, x, _RET_IP_); 2633 slab_free(s, page, x, _RET_IP_);
2631 2634
2632 trace_kmem_cache_free(_RET_IP_, x); 2635 trace_kmem_cache_free(_RET_IP_, x);
@@ -3041,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3041 3044
3042} 3045}
3043 3046
3044static int kmem_cache_open(struct kmem_cache *s, 3047static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3045 const char *name, size_t size,
3046 size_t align, unsigned long flags,
3047 void (*ctor)(void *))
3048{ 3048{
3049 memset(s, 0, kmem_size); 3049 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3050 s->name = name;
3051 s->ctor = ctor;
3052 s->object_size = size;
3053 s->align = align;
3054 s->flags = kmem_cache_flags(size, flags, name, ctor);
3055 s->reserved = 0; 3050 s->reserved = 0;
3056 3051
3057 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) 3052 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
@@ -3113,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s,
3113 else 3108 else
3114 s->cpu_partial = 30; 3109 s->cpu_partial = 30;
3115 3110
3116 s->refcount = 1;
3117#ifdef CONFIG_NUMA 3111#ifdef CONFIG_NUMA
3118 s->remote_node_defrag_ratio = 1000; 3112 s->remote_node_defrag_ratio = 1000;
3119#endif 3113#endif
@@ -3121,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s,
3121 goto error; 3115 goto error;
3122 3116
3123 if (alloc_kmem_cache_cpus(s)) 3117 if (alloc_kmem_cache_cpus(s))
3124 return 1; 3118 return 0;
3125 3119
3126 free_kmem_cache_nodes(s); 3120 free_kmem_cache_nodes(s);
3127error: 3121error:
3128 if (flags & SLAB_PANIC) 3122 if (flags & SLAB_PANIC)
3129 panic("Cannot create slab %s size=%lu realsize=%u " 3123 panic("Cannot create slab %s size=%lu realsize=%u "
3130 "order=%u offset=%u flags=%lx\n", 3124 "order=%u offset=%u flags=%lx\n",
3131 s->name, (unsigned long)size, s->size, oo_order(s->oo), 3125 s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
3132 s->offset, flags); 3126 s->offset, flags);
3133 return 0; 3127 return -EINVAL;
3134} 3128}
3135 3129
3136/* 3130/*
@@ -3152,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3152 sizeof(long), GFP_ATOMIC); 3146 sizeof(long), GFP_ATOMIC);
3153 if (!map) 3147 if (!map)
3154 return; 3148 return;
3155 slab_err(s, page, "%s", text); 3149 slab_err(s, page, text, s->name);
3156 slab_lock(page); 3150 slab_lock(page);
3157 3151
3158 get_map(s, page, map); 3152 get_map(s, page, map);
@@ -3184,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3184 discard_slab(s, page); 3178 discard_slab(s, page);
3185 } else { 3179 } else {
3186 list_slab_objects(s, page, 3180 list_slab_objects(s, page,
3187 "Objects remaining on kmem_cache_close()"); 3181 "Objects remaining in %s on kmem_cache_close()");
3188 } 3182 }
3189 } 3183 }
3190} 3184}
@@ -3197,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3197 int node; 3191 int node;
3198 3192
3199 flush_all(s); 3193 flush_all(s);
3200 free_percpu(s->cpu_slab);
3201 /* Attempt to free all objects */ 3194 /* Attempt to free all objects */
3202 for_each_node_state(node, N_NORMAL_MEMORY) { 3195 for_each_node_state(node, N_NORMAL_MEMORY) {
3203 struct kmem_cache_node *n = get_node(s, node); 3196 struct kmem_cache_node *n = get_node(s, node);
@@ -3206,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3206 if (n->nr_partial || slabs_node(s, node)) 3199 if (n->nr_partial || slabs_node(s, node))
3207 return 1; 3200 return 1;
3208 } 3201 }
3202 free_percpu(s->cpu_slab);
3209 free_kmem_cache_nodes(s); 3203 free_kmem_cache_nodes(s);
3210 return 0; 3204 return 0;
3211} 3205}
3212 3206
3213/* 3207int __kmem_cache_shutdown(struct kmem_cache *s)
3214 * Close a cache and release the kmem_cache structure
3215 * (must be used for caches created using kmem_cache_create)
3216 */
3217void kmem_cache_destroy(struct kmem_cache *s)
3218{ 3208{
3219 mutex_lock(&slab_mutex); 3209 int rc = kmem_cache_close(s);
3220 s->refcount--; 3210
3221 if (!s->refcount) { 3211 if (!rc)
3222 list_del(&s->list);
3223 mutex_unlock(&slab_mutex);
3224 if (kmem_cache_close(s)) {
3225 printk(KERN_ERR "SLUB %s: %s called for cache that "
3226 "still has objects.\n", s->name, __func__);
3227 dump_stack();
3228 }
3229 if (s->flags & SLAB_DESTROY_BY_RCU)
3230 rcu_barrier();
3231 sysfs_slab_remove(s); 3212 sysfs_slab_remove(s);
3232 } else 3213
3233 mutex_unlock(&slab_mutex); 3214 return rc;
3234} 3215}
3235EXPORT_SYMBOL(kmem_cache_destroy);
3236 3216
3237/******************************************************************** 3217/********************************************************************
3238 * Kmalloc subsystem 3218 * Kmalloc subsystem
@@ -3241,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
3241struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 3221struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
3242EXPORT_SYMBOL(kmalloc_caches); 3222EXPORT_SYMBOL(kmalloc_caches);
3243 3223
3244static struct kmem_cache *kmem_cache;
3245
3246#ifdef CONFIG_ZONE_DMA 3224#ifdef CONFIG_ZONE_DMA
3247static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; 3225static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
3248#endif 3226#endif
@@ -3288,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3288{ 3266{
3289 struct kmem_cache *s; 3267 struct kmem_cache *s;
3290 3268
3291 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3269 s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3270
3271 s->name = name;
3272 s->size = s->object_size = size;
3273 s->align = ARCH_KMALLOC_MINALIGN;
3292 3274
3293 /* 3275 /*
3294 * This function is called with IRQs disabled during early-boot on 3276 * This function is called with IRQs disabled during early-boot on
3295 * single CPU so there's no need to take slab_mutex here. 3277 * single CPU so there's no need to take slab_mutex here.
3296 */ 3278 */
3297 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 3279 if (kmem_cache_open(s, flags))
3298 flags, NULL))
3299 goto panic; 3280 goto panic;
3300 3281
3301 list_add(&s->list, &slab_caches); 3282 list_add(&s->list, &slab_caches);
@@ -3734,12 +3715,12 @@ void __init kmem_cache_init(void)
3734 slub_max_order = 0; 3715 slub_max_order = 0;
3735 3716
3736 kmem_size = offsetof(struct kmem_cache, node) + 3717 kmem_size = offsetof(struct kmem_cache, node) +
3737 nr_node_ids * sizeof(struct kmem_cache_node *); 3718 nr_node_ids * sizeof(struct kmem_cache_node *);
3738 3719
3739 /* Allocate two kmem_caches from the page allocator */ 3720 /* Allocate two kmem_caches from the page allocator */
3740 kmalloc_size = ALIGN(kmem_size, cache_line_size()); 3721 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3741 order = get_order(2 * kmalloc_size); 3722 order = get_order(2 * kmalloc_size);
3742 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); 3723 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
3743 3724
3744 /* 3725 /*
3745 * Must first have the slab cache available for the allocations of the 3726 * Must first have the slab cache available for the allocations of the
@@ -3748,9 +3729,10 @@ void __init kmem_cache_init(void)
3748 */ 3729 */
3749 kmem_cache_node = (void *)kmem_cache + kmalloc_size; 3730 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3750 3731
3751 kmem_cache_open(kmem_cache_node, "kmem_cache_node", 3732 kmem_cache_node->name = "kmem_cache_node";
3752 sizeof(struct kmem_cache_node), 3733 kmem_cache_node->size = kmem_cache_node->object_size =
3753 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3734 sizeof(struct kmem_cache_node);
3735 kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3754 3736
3755 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3737 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3756 3738
@@ -3758,8 +3740,10 @@ void __init kmem_cache_init(void)
3758 slab_state = PARTIAL; 3740 slab_state = PARTIAL;
3759 3741
3760 temp_kmem_cache = kmem_cache; 3742 temp_kmem_cache = kmem_cache;
3761 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 3743 kmem_cache->name = "kmem_cache";
3762 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3744 kmem_cache->size = kmem_cache->object_size = kmem_size;
3745 kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3746
3763 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3747 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3764 memcpy(kmem_cache, temp_kmem_cache, kmem_size); 3748 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3765 3749
@@ -3948,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size,
3948 return NULL; 3932 return NULL;
3949} 3933}
3950 3934
3951struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 3935struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
3952 size_t align, unsigned long flags, void (*ctor)(void *)) 3936 size_t align, unsigned long flags, void (*ctor)(void *))
3953{ 3937{
3954 struct kmem_cache *s; 3938 struct kmem_cache *s;
3955 char *n;
3956 3939
3957 s = find_mergeable(size, align, flags, name, ctor); 3940 s = find_mergeable(size, align, flags, name, ctor);
3958 if (s) { 3941 if (s) {
@@ -3966,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3966 3949
3967 if (sysfs_slab_alias(s, name)) { 3950 if (sysfs_slab_alias(s, name)) {
3968 s->refcount--; 3951 s->refcount--;
3969 return NULL; 3952 s = NULL;
3970 } 3953 }
3971 return s;
3972 } 3954 }
3973 3955
3974 n = kstrdup(name, GFP_KERNEL); 3956 return s;
3975 if (!n) 3957}
3976 return NULL;
3977 3958
3978 s = kmalloc(kmem_size, GFP_KERNEL); 3959int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3979 if (s) { 3960{
3980 if (kmem_cache_open(s, n, 3961 int err;
3981 size, align, flags, ctor)) { 3962
3982 int r; 3963 err = kmem_cache_open(s, flags);
3964 if (err)
3965 return err;
3983 3966
3984 list_add(&s->list, &slab_caches); 3967 mutex_unlock(&slab_mutex);
3985 mutex_unlock(&slab_mutex); 3968 err = sysfs_slab_add(s);
3986 r = sysfs_slab_add(s); 3969 mutex_lock(&slab_mutex);
3987 mutex_lock(&slab_mutex);
3988 3970
3989 if (!r) 3971 if (err)
3990 return s; 3972 kmem_cache_close(s);
3991 3973
3992 list_del(&s->list); 3974 return err;
3993 kmem_cache_close(s);
3994 }
3995 kfree(s);
3996 }
3997 kfree(n);
3998 return NULL;
3999} 3975}
4000 3976
4001#ifdef CONFIG_SMP 3977#ifdef CONFIG_SMP
@@ -5225,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
5225 return err; 5201 return err;
5226} 5202}
5227 5203
5228static void kmem_cache_release(struct kobject *kobj)
5229{
5230 struct kmem_cache *s = to_slab(kobj);
5231
5232 kfree(s->name);
5233 kfree(s);
5234}
5235
5236static const struct sysfs_ops slab_sysfs_ops = { 5204static const struct sysfs_ops slab_sysfs_ops = {
5237 .show = slab_attr_show, 5205 .show = slab_attr_show,
5238 .store = slab_attr_store, 5206 .store = slab_attr_store,
@@ -5240,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = {
5240 5208
5241static struct kobj_type slab_ktype = { 5209static struct kobj_type slab_ktype = {
5242 .sysfs_ops = &slab_sysfs_ops, 5210 .sysfs_ops = &slab_sysfs_ops,
5243 .release = kmem_cache_release
5244}; 5211};
5245 5212
5246static int uevent_filter(struct kset *kset, struct kobject *kobj) 5213static int uevent_filter(struct kset *kset, struct kobject *kobj)