aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 19:18:33 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-05 05:00:37 -0400
commit8a13a4cc80bb25c9eab2e7e56bab724fcfa55fce (patch)
treea212edb3d0b139b0743ca5ca34c14037a6ada4dc /mm/slub.c
parent278b1bb1313664d4999a7f7d47a8a8d964862d02 (diff)
mm/sl[aou]b: Shrink __kmem_cache_create() parameter lists
Do the initial settings of the fields in common code. This will allow us to push more processing into common code later and improve readability. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c39
1 files changed, 18 insertions, 21 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0ad3fffc7d23..d8ee419d5a15 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3029,16 +3029,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3029 3029
3030} 3030}
3031 3031
3032static int kmem_cache_open(struct kmem_cache *s, 3032static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3033 const char *name, size_t size,
3034 size_t align, unsigned long flags,
3035 void (*ctor)(void *))
3036{ 3033{
3037 s->name = name; 3034 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3038 s->ctor = ctor;
3039 s->object_size = size;
3040 s->align = align;
3041 s->flags = kmem_cache_flags(size, flags, name, ctor);
3042 s->reserved = 0; 3035 s->reserved = 0;
3043 3036
3044 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) 3037 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
@@ -3115,7 +3108,7 @@ error:
3115 if (flags & SLAB_PANIC) 3108 if (flags & SLAB_PANIC)
3116 panic("Cannot create slab %s size=%lu realsize=%u " 3109 panic("Cannot create slab %s size=%lu realsize=%u "
3117 "order=%u offset=%u flags=%lx\n", 3110 "order=%u offset=%u flags=%lx\n",
3118 s->name, (unsigned long)size, s->size, oo_order(s->oo), 3111 s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
3119 s->offset, flags); 3112 s->offset, flags);
3120 return -EINVAL; 3113 return -EINVAL;
3121} 3114}
@@ -3261,12 +3254,15 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3261 3254
3262 s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 3255 s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3263 3256
3257 s->name = name;
3258 s->size = s->object_size = size;
3259 s->align = ARCH_KMALLOC_MINALIGN;
3260
3264 /* 3261 /*
3265 * This function is called with IRQs disabled during early-boot on 3262 * This function is called with IRQs disabled during early-boot on
3266 * single CPU so there's no need to take slab_mutex here. 3263 * single CPU so there's no need to take slab_mutex here.
3267 */ 3264 */
3268 if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 3265 if (kmem_cache_open(s, flags))
3269 flags, NULL))
3270 goto panic; 3266 goto panic;
3271 3267
3272 list_add(&s->list, &slab_caches); 3268 list_add(&s->list, &slab_caches);
@@ -3719,9 +3715,10 @@ void __init kmem_cache_init(void)
3719 */ 3715 */
3720 kmem_cache_node = (void *)kmem_cache + kmalloc_size; 3716 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3721 3717
3722 kmem_cache_open(kmem_cache_node, "kmem_cache_node", 3718 kmem_cache_node->name = "kmem_cache_node";
3723 sizeof(struct kmem_cache_node), 3719 kmem_cache_node->size = kmem_cache_node->object_size =
3724 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3720 sizeof(struct kmem_cache_node);
3721 kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3725 3722
3726 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3723 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3727 3724
@@ -3729,8 +3726,10 @@ void __init kmem_cache_init(void)
3729 slab_state = PARTIAL; 3726 slab_state = PARTIAL;
3730 3727
3731 temp_kmem_cache = kmem_cache; 3728 temp_kmem_cache = kmem_cache;
3732 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 3729 kmem_cache->name = "kmem_cache";
3733 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3730 kmem_cache->size = kmem_cache->object_size = kmem_size;
3731 kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3732
3734 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3733 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3735 memcpy(kmem_cache, temp_kmem_cache, kmem_size); 3734 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3736 3735
@@ -3943,11 +3942,9 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
3943 return s; 3942 return s;
3944} 3943}
3945 3944
3946int __kmem_cache_create(struct kmem_cache *s, 3945int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3947 const char *name, size_t size,
3948 size_t align, unsigned long flags, void (*ctor)(void *))
3949{ 3946{
3950 return kmem_cache_open(s, name, size, align, flags, ctor); 3947 return kmem_cache_open(s, flags);
3951} 3948}
3952 3949
3953#ifdef CONFIG_SMP 3950#ifdef CONFIG_SMP