aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-11-28 11:23:07 -0500
committerPekka Enberg <penberg@kernel.org>2012-12-11 05:14:27 -0500
commit45530c4474d258b822e2639c786606d8257aad8b (patch)
tree87b6569f777987037c490b9660826a02e17e228d /mm/slab.c
parent3c58346525d82625e68e24f071804c2dc057b6f4 (diff)
mm, sl[au]b: create common functions for boot slab creation
Use a special function to create kmalloc caches and use that function in SLAB and SLUB. Acked-by: Joonsoo Kim <js1304@gmail.com> Reviewed-by: Glauber Costa <glommer@parallels.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c48
1 files changed, 14 insertions, 34 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c7ea5234c4e9..e351acea6026 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1659,23 +1659,13 @@ void __init kmem_cache_init(void)
1659 * bug. 1659 * bug.
1660 */ 1660 */
1661 1661
1662 sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1662 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
1663 sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; 1663 sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
1664 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; 1664
1665 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; 1665 if (INDEX_AC != INDEX_L3)
1666 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; 1666 sizes[INDEX_L3].cs_cachep =
1667 __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); 1667 create_kmalloc_cache(names[INDEX_L3].name,
1668 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); 1668 sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
1669
1670 if (INDEX_AC != INDEX_L3) {
1671 sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1672 sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
1673 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
1674 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
1675 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1676 __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1677 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
1678 }
1679 1669
1680 slab_early_init = 0; 1670 slab_early_init = 0;
1681 1671
@@ -1687,24 +1677,14 @@ void __init kmem_cache_init(void)
1687 * Note for systems short on memory removing the alignment will 1677 * Note for systems short on memory removing the alignment will
1688 * allow tighter packing of the smaller caches. 1678 * allow tighter packing of the smaller caches.
1689 */ 1679 */
1690 if (!sizes->cs_cachep) { 1680 if (!sizes->cs_cachep)
1691 sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1681 sizes->cs_cachep = create_kmalloc_cache(names->name,
1692 sizes->cs_cachep->name = names->name; 1682 sizes->cs_size, ARCH_KMALLOC_FLAGS);
1693 sizes->cs_cachep->size = sizes->cs_size; 1683
1694 sizes->cs_cachep->object_size = sizes->cs_size;
1695 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1696 __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1697 list_add(&sizes->cs_cachep->list, &slab_caches);
1698 }
1699#ifdef CONFIG_ZONE_DMA 1684#ifdef CONFIG_ZONE_DMA
1700 sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1685 sizes->cs_dmacachep = create_kmalloc_cache(
1701 sizes->cs_dmacachep->name = names->name_dma; 1686 names->name_dma, sizes->cs_size,
1702 sizes->cs_dmacachep->size = sizes->cs_size; 1687 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
1703 sizes->cs_dmacachep->object_size = sizes->cs_size;
1704 sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
1705 __kmem_cache_create(sizes->cs_dmacachep,
1706 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
1707 list_add(&sizes->cs_dmacachep->list, &slab_caches);
1708#endif 1688#endif
1709 sizes++; 1689 sizes++;
1710 names++; 1690 names++;