aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 20:20:34 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-05 05:00:36 -0400
commit278b1bb1313664d4999a7f7d47a8a8d964862d02 (patch)
tree65e05bc30338a24fd4afd4c4e8b49b8d3e002218 /mm/slob.c
parent96d17b7be0a9849d381442030886211dbb2a7061 (diff)
mm/sl[aou]b: Move kmem_cache allocations into common code
Shift the allocations to common code. That way the allocation and freeing of the kmem_cache structures is handled by common code. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c42
1 files changed, 17 insertions, 25 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 50f605322700..9b0cee1e8475 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -508,34 +508,26 @@ size_t ksize(const void *block)
508} 508}
509EXPORT_SYMBOL(ksize); 509EXPORT_SYMBOL(ksize);
510 510
511struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 511int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size,
512 size_t align, unsigned long flags, void (*ctor)(void *)) 512 size_t align, unsigned long flags, void (*ctor)(void *))
513{ 513{
514 struct kmem_cache *c; 514 c->name = name;
515 515 c->size = size;
516 c = slob_alloc(sizeof(struct kmem_cache), 516 if (flags & SLAB_DESTROY_BY_RCU) {
517 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); 517 /* leave room for rcu footer at the end of object */
518 518 c->size += sizeof(struct slob_rcu);
519 if (c) {
520 c->name = name;
521 c->size = size;
522 if (flags & SLAB_DESTROY_BY_RCU) {
523 /* leave room for rcu footer at the end of object */
524 c->size += sizeof(struct slob_rcu);
525 }
526 c->flags = flags;
527 c->ctor = ctor;
528 /* ignore alignment unless it's forced */
529 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
530 if (c->align < ARCH_SLAB_MINALIGN)
531 c->align = ARCH_SLAB_MINALIGN;
532 if (c->align < align)
533 c->align = align;
534
535 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
536 c->refcount = 1;
537 } 519 }
538 return c; 520 c->flags = flags;
521 c->ctor = ctor;
522 /* ignore alignment unless it's forced */
523 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
524 if (c->align < ARCH_SLAB_MINALIGN)
525 c->align = ARCH_SLAB_MINALIGN;
526 if (c->align < align)
527 c->align = align;
528
529 c->refcount = 1;
530 return 0;
539} 531}
540 532
541void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) 533void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)