aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 19:18:33 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-05 05:00:37 -0400
commit8a13a4cc80bb25c9eab2e7e56bab724fcfa55fce (patch)
treea212edb3d0b139b0743ca5ca34c14037a6ada4dc /mm/slab.c
parent278b1bb1313664d4999a7f7d47a8a8d964862d02 (diff)
mm/sl[aou]b: Shrink __kmem_cache_create() parameter lists
Do the initial settings of the fields in common code. This will allow us to push more processing into common code later and improve readability. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c73
1 files changed, 33 insertions, 40 deletions
diff --git a/mm/slab.c b/mm/slab.c
index abc83334e5fb..f1f6d54e129a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1677,20 +1677,20 @@ void __init kmem_cache_init(void)
1677 */ 1677 */
1678 1678
1679 sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1679 sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1680 __kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, 1680 sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
1681 sizes[INDEX_AC].cs_size, 1681 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
1682 ARCH_KMALLOC_MINALIGN, 1682 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
1683 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1683 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1684 NULL); 1684 __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1685
1686 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); 1685 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
1686
1687 if (INDEX_AC != INDEX_L3) { 1687 if (INDEX_AC != INDEX_L3) {
1688 sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1688 sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1689 __kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, 1689 sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
1690 sizes[INDEX_L3].cs_size, 1690 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
1691 ARCH_KMALLOC_MINALIGN, 1691 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
1692 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1692 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1693 NULL); 1693 __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1694 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); 1694 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
1695 } 1695 }
1696 1696
@@ -1706,22 +1706,21 @@ void __init kmem_cache_init(void)
1706 */ 1706 */
1707 if (!sizes->cs_cachep) { 1707 if (!sizes->cs_cachep) {
1708 sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1708 sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1709 __kmem_cache_create(sizes->cs_cachep, names->name, 1709 sizes->cs_cachep->name = names->name;
1710 sizes->cs_size, 1710 sizes->cs_cachep->size = sizes->cs_size;
1711 ARCH_KMALLOC_MINALIGN, 1711 sizes->cs_cachep->object_size = sizes->cs_size;
1712 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1712 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1713 NULL); 1713 __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1714 list_add(&sizes->cs_cachep->list, &slab_caches); 1714 list_add(&sizes->cs_cachep->list, &slab_caches);
1715 } 1715 }
1716#ifdef CONFIG_ZONE_DMA 1716#ifdef CONFIG_ZONE_DMA
1717 sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1717 sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1718 sizes->cs_dmacachep->name = names->name_dma;
1719 sizes->cs_dmacachep->size = sizes->cs_size;
1720 sizes->cs_dmacachep->object_size = sizes->cs_size;
1721 sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
1718 __kmem_cache_create(sizes->cs_dmacachep, 1722 __kmem_cache_create(sizes->cs_dmacachep,
1719 names->name_dma, 1723 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
1720 sizes->cs_size,
1721 ARCH_KMALLOC_MINALIGN,
1722 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1723 SLAB_PANIC,
1724 NULL);
1725 list_add(&sizes->cs_dmacachep->list, &slab_caches); 1724 list_add(&sizes->cs_dmacachep->list, &slab_caches);
1726#endif 1725#endif
1727 sizes++; 1726 sizes++;
@@ -2360,12 +2359,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2360 * as davem. 2359 * as davem.
2361 */ 2360 */
2362int 2361int
2363__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, 2362__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2364 unsigned long flags, void (*ctor)(void *))
2365{ 2363{
2366 size_t left_over, slab_size, ralign; 2364 size_t left_over, slab_size, ralign;
2367 gfp_t gfp; 2365 gfp_t gfp;
2368 int err; 2366 int err;
2367 size_t size = cachep->size;
2369 2368
2370#if DEBUG 2369#if DEBUG
2371#if FORCED_DEBUG 2370#if FORCED_DEBUG
@@ -2437,8 +2436,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
2437 ralign = ARCH_SLAB_MINALIGN; 2436 ralign = ARCH_SLAB_MINALIGN;
2438 } 2437 }
2439 /* 3) caller mandated alignment */ 2438 /* 3) caller mandated alignment */
2440 if (ralign < align) { 2439 if (ralign < cachep->align) {
2441 ralign = align; 2440 ralign = cachep->align;
2442 } 2441 }
2443 /* disable debug if necessary */ 2442 /* disable debug if necessary */
2444 if (ralign > __alignof__(unsigned long long)) 2443 if (ralign > __alignof__(unsigned long long))
@@ -2446,7 +2445,7 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
2446 /* 2445 /*
2447 * 4) Store it. 2446 * 4) Store it.
2448 */ 2447 */
2449 align = ralign; 2448 cachep->align = ralign;
2450 2449
2451 if (slab_is_available()) 2450 if (slab_is_available())
2452 gfp = GFP_KERNEL; 2451 gfp = GFP_KERNEL;
@@ -2454,8 +2453,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
2454 gfp = GFP_NOWAIT; 2453 gfp = GFP_NOWAIT;
2455 2454
2456 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; 2455 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2457 cachep->object_size = size;
2458 cachep->align = align;
2459#if DEBUG 2456#if DEBUG
2460 2457
2461 /* 2458 /*
@@ -2500,17 +2497,15 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
2500 */ 2497 */
2501 flags |= CFLGS_OFF_SLAB; 2498 flags |= CFLGS_OFF_SLAB;
2502 2499
2503 size = ALIGN(size, align); 2500 size = ALIGN(size, cachep->align);
2504 2501
2505 left_over = calculate_slab_order(cachep, size, align, flags); 2502 left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2506 2503
2507 if (!cachep->num) { 2504 if (!cachep->num)
2508 printk(KERN_ERR
2509 "kmem_cache_create: couldn't create cache %s.\n", name);
2510 return -E2BIG; 2505 return -E2BIG;
2511 } 2506
2512 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2507 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2513 + sizeof(struct slab), align); 2508 + sizeof(struct slab), cachep->align);
2514 2509
2515 /* 2510 /*
2516 * If the slab has been placed off-slab, and we have enough space then 2511 * If the slab has been placed off-slab, and we have enough space then
@@ -2538,8 +2533,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
2538 2533
2539 cachep->colour_off = cache_line_size(); 2534 cachep->colour_off = cache_line_size();
2540 /* Offset must be a multiple of the alignment. */ 2535 /* Offset must be a multiple of the alignment. */
2541 if (cachep->colour_off < align) 2536 if (cachep->colour_off < cachep->align)
2542 cachep->colour_off = align; 2537 cachep->colour_off = cachep->align;
2543 cachep->colour = left_over / cachep->colour_off; 2538 cachep->colour = left_over / cachep->colour_off;
2544 cachep->slab_size = slab_size; 2539 cachep->slab_size = slab_size;
2545 cachep->flags = flags; 2540 cachep->flags = flags;
@@ -2560,8 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
2560 */ 2555 */
2561 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); 2556 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2562 } 2557 }
2563 cachep->ctor = ctor;
2564 cachep->name = name;
2565 cachep->refcount = 1; 2558 cachep->refcount = 1;
2566 2559
2567 err = setup_cpu_cache(cachep, gfp); 2560 err = setup_cpu_cache(cachep, gfp);