diff options
author | Christoph Lameter <cl@linux.com> | 2012-09-04 20:20:34 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-09-05 05:00:36 -0400 |
commit | 278b1bb1313664d4999a7f7d47a8a8d964862d02 (patch) | |
tree | 65e05bc30338a24fd4afd4c4e8b49b8d3e002218 /mm/slab.c | |
parent | 96d17b7be0a9849d381442030886211dbb2a7061 (diff) |
mm/sl[aou]b: Move kmem_cache allocations into common code
Shift the allocations to common code. That way the allocation and
freeing of the kmem_cache structures is handled by common code.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 34 |
1 files changed, 16 insertions, 18 deletions
@@ -1676,7 +1676,8 @@ void __init kmem_cache_init(void) | |||
1676 | * bug. | 1676 | * bug. |
1677 | */ | 1677 | */ |
1678 | 1678 | ||
1679 | sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, | 1679 | sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1680 | __kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, | ||
1680 | sizes[INDEX_AC].cs_size, | 1681 | sizes[INDEX_AC].cs_size, |
1681 | ARCH_KMALLOC_MINALIGN, | 1682 | ARCH_KMALLOC_MINALIGN, |
1682 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1683 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, |
@@ -1684,8 +1685,8 @@ void __init kmem_cache_init(void) | |||
1684 | 1685 | ||
1685 | list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); | 1686 | list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); |
1686 | if (INDEX_AC != INDEX_L3) { | 1687 | if (INDEX_AC != INDEX_L3) { |
1687 | sizes[INDEX_L3].cs_cachep = | 1688 | sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1688 | __kmem_cache_create(names[INDEX_L3].name, | 1689 | __kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, |
1689 | sizes[INDEX_L3].cs_size, | 1690 | sizes[INDEX_L3].cs_size, |
1690 | ARCH_KMALLOC_MINALIGN, | 1691 | ARCH_KMALLOC_MINALIGN, |
1691 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1692 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, |
@@ -1704,7 +1705,8 @@ void __init kmem_cache_init(void) | |||
1704 | * allow tighter packing of the smaller caches. | 1705 | * allow tighter packing of the smaller caches. |
1705 | */ | 1706 | */ |
1706 | if (!sizes->cs_cachep) { | 1707 | if (!sizes->cs_cachep) { |
1707 | sizes->cs_cachep = __kmem_cache_create(names->name, | 1708 | sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1709 | __kmem_cache_create(sizes->cs_cachep, names->name, | ||
1708 | sizes->cs_size, | 1710 | sizes->cs_size, |
1709 | ARCH_KMALLOC_MINALIGN, | 1711 | ARCH_KMALLOC_MINALIGN, |
1710 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1712 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, |
@@ -1712,7 +1714,8 @@ void __init kmem_cache_init(void) | |||
1712 | list_add(&sizes->cs_cachep->list, &slab_caches); | 1714 | list_add(&sizes->cs_cachep->list, &slab_caches); |
1713 | } | 1715 | } |
1714 | #ifdef CONFIG_ZONE_DMA | 1716 | #ifdef CONFIG_ZONE_DMA |
1715 | sizes->cs_dmacachep = __kmem_cache_create( | 1717 | sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1718 | __kmem_cache_create(sizes->cs_dmacachep, | ||
1716 | names->name_dma, | 1719 | names->name_dma, |
1717 | sizes->cs_size, | 1720 | sizes->cs_size, |
1718 | ARCH_KMALLOC_MINALIGN, | 1721 | ARCH_KMALLOC_MINALIGN, |
@@ -2356,13 +2359,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2356 | * cacheline. This can be beneficial if you're counting cycles as closely | 2359 | * cacheline. This can be beneficial if you're counting cycles as closely |
2357 | * as davem. | 2360 | * as davem. |
2358 | */ | 2361 | */ |
2359 | struct kmem_cache * | 2362 | int |
2360 | __kmem_cache_create (const char *name, size_t size, size_t align, | 2363 | __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, |
2361 | unsigned long flags, void (*ctor)(void *)) | 2364 | unsigned long flags, void (*ctor)(void *)) |
2362 | { | 2365 | { |
2363 | size_t left_over, slab_size, ralign; | 2366 | size_t left_over, slab_size, ralign; |
2364 | struct kmem_cache *cachep = NULL; | ||
2365 | gfp_t gfp; | 2367 | gfp_t gfp; |
2368 | int err; | ||
2366 | 2369 | ||
2367 | #if DEBUG | 2370 | #if DEBUG |
2368 | #if FORCED_DEBUG | 2371 | #if FORCED_DEBUG |
@@ -2450,11 +2453,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2450 | else | 2453 | else |
2451 | gfp = GFP_NOWAIT; | 2454 | gfp = GFP_NOWAIT; |
2452 | 2455 | ||
2453 | /* Get cache's description obj. */ | ||
2454 | cachep = kmem_cache_zalloc(kmem_cache, gfp); | ||
2455 | if (!cachep) | ||
2456 | return NULL; | ||
2457 | |||
2458 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; | 2456 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; |
2459 | cachep->object_size = size; | 2457 | cachep->object_size = size; |
2460 | cachep->align = align; | 2458 | cachep->align = align; |
@@ -2509,8 +2507,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2509 | if (!cachep->num) { | 2507 | if (!cachep->num) { |
2510 | printk(KERN_ERR | 2508 | printk(KERN_ERR |
2511 | "kmem_cache_create: couldn't create cache %s.\n", name); | 2509 | "kmem_cache_create: couldn't create cache %s.\n", name); |
2512 | kmem_cache_free(kmem_cache, cachep); | 2510 | return -E2BIG; |
2513 | return NULL; | ||
2514 | } | 2511 | } |
2515 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) | 2512 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) |
2516 | + sizeof(struct slab), align); | 2513 | + sizeof(struct slab), align); |
@@ -2567,9 +2564,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2567 | cachep->name = name; | 2564 | cachep->name = name; |
2568 | cachep->refcount = 1; | 2565 | cachep->refcount = 1; |
2569 | 2566 | ||
2570 | if (setup_cpu_cache(cachep, gfp)) { | 2567 | err = setup_cpu_cache(cachep, gfp); |
2568 | if (err) { | ||
2571 | __kmem_cache_shutdown(cachep); | 2569 | __kmem_cache_shutdown(cachep); |
2572 | return NULL; | 2570 | return err; |
2573 | } | 2571 | } |
2574 | 2572 | ||
2575 | if (flags & SLAB_DEBUG_OBJECTS) { | 2573 | if (flags & SLAB_DEBUG_OBJECTS) { |
@@ -2582,7 +2580,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2582 | slab_set_debugobj_lock_classes(cachep); | 2580 | slab_set_debugobj_lock_classes(cachep); |
2583 | } | 2581 | } |
2584 | 2582 | ||
2585 | return cachep; | 2583 | return 0; |
2586 | } | 2584 | } |
2587 | 2585 | ||
2588 | #if DEBUG | 2586 | #if DEBUG |