aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-11-28 11:23:07 -0500
committerPekka Enberg <penberg@kernel.org>2012-12-11 05:14:27 -0500
commit45530c4474d258b822e2639c786606d8257aad8b (patch)
tree87b6569f777987037c490b9660826a02e17e228d /mm
parent3c58346525d82625e68e24f071804c2dc057b6f4 (diff)
mm, sl[au]b: create common functions for boot slab creation
Use a special function to create kmalloc caches and use that function in SLAB and SLUB. Acked-by: Joonsoo Kim <js1304@gmail.com> Reviewed-by: Glauber Costa <glommer@parallels.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c48
-rw-r--r--mm/slab.h5
-rw-r--r--mm/slab_common.c36
-rw-r--r--mm/slub.c37
4 files changed, 60 insertions, 66 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c7ea5234c4e9..e351acea6026 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1659,23 +1659,13 @@ void __init kmem_cache_init(void)
1659 * bug. 1659 * bug.
1660 */ 1660 */
1661 1661
1662 sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1662 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
1663 sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; 1663 sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
1664 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; 1664
1665 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; 1665 if (INDEX_AC != INDEX_L3)
1666 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; 1666 sizes[INDEX_L3].cs_cachep =
1667 __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); 1667 create_kmalloc_cache(names[INDEX_L3].name,
1668 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); 1668 sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
1669
1670 if (INDEX_AC != INDEX_L3) {
1671 sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1672 sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
1673 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
1674 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
1675 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1676 __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1677 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
1678 }
1679 1669
1680 slab_early_init = 0; 1670 slab_early_init = 0;
1681 1671
@@ -1687,24 +1677,14 @@ void __init kmem_cache_init(void)
1687 * Note for systems short on memory removing the alignment will 1677 * Note for systems short on memory removing the alignment will
1688 * allow tighter packing of the smaller caches. 1678 * allow tighter packing of the smaller caches.
1689 */ 1679 */
1690 if (!sizes->cs_cachep) { 1680 if (!sizes->cs_cachep)
1691 sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1681 sizes->cs_cachep = create_kmalloc_cache(names->name,
1692 sizes->cs_cachep->name = names->name; 1682 sizes->cs_size, ARCH_KMALLOC_FLAGS);
1693 sizes->cs_cachep->size = sizes->cs_size; 1683
1694 sizes->cs_cachep->object_size = sizes->cs_size;
1695 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1696 __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1697 list_add(&sizes->cs_cachep->list, &slab_caches);
1698 }
1699#ifdef CONFIG_ZONE_DMA 1684#ifdef CONFIG_ZONE_DMA
1700 sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1685 sizes->cs_dmacachep = create_kmalloc_cache(
1701 sizes->cs_dmacachep->name = names->name_dma; 1686 names->name_dma, sizes->cs_size,
1702 sizes->cs_dmacachep->size = sizes->cs_size; 1687 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
1703 sizes->cs_dmacachep->object_size = sizes->cs_size;
1704 sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
1705 __kmem_cache_create(sizes->cs_dmacachep,
1706 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
1707 list_add(&sizes->cs_dmacachep->list, &slab_caches);
1708#endif 1688#endif
1709 sizes++; 1689 sizes++;
1710 names++; 1690 names++;
diff --git a/mm/slab.h b/mm/slab.h
index 66a62d3536c6..492eafa0b538 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -35,6 +35,11 @@ extern struct kmem_cache *kmem_cache;
35/* Functions provided by the slab allocators */ 35/* Functions provided by the slab allocators */
36extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 36extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
37 37
38extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
39 unsigned long flags);
40extern void create_boot_cache(struct kmem_cache *, const char *name,
41 size_t size, unsigned long flags);
42
38#ifdef CONFIG_SLUB 43#ifdef CONFIG_SLUB
39struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 44struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
40 size_t align, unsigned long flags, void (*ctor)(void *)); 45 size_t align, unsigned long flags, void (*ctor)(void *));
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b705be7faa48..497b45c25bae 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -202,6 +202,42 @@ int slab_is_available(void)
202 return slab_state >= UP; 202 return slab_state >= UP;
203} 203}
204 204
205#ifndef CONFIG_SLOB
206/* Create a cache during boot when no slab services are available yet */
207void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
208 unsigned long flags)
209{
210 int err;
211
212 s->name = name;
213 s->size = s->object_size = size;
214 s->align = ARCH_KMALLOC_MINALIGN;
215 err = __kmem_cache_create(s, flags);
216
217 if (err)
218 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
219 name, size, err);
220
221 s->refcount = -1; /* Exempt from merging for now */
222}
223
224struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
225 unsigned long flags)
226{
227 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
228
229 if (!s)
230 panic("Out of memory when creating slab %s\n", name);
231
232 create_boot_cache(s, name, size, flags);
233 list_add(&s->list, &slab_caches);
234 s->refcount = 1;
235 return s;
236}
237
238#endif /* !CONFIG_SLOB */
239
240
205#ifdef CONFIG_SLABINFO 241#ifdef CONFIG_SLABINFO
206static void print_slabinfo_header(struct seq_file *m) 242static void print_slabinfo_header(struct seq_file *m)
207{ 243{
diff --git a/mm/slub.c b/mm/slub.c
index 33576b0cfc41..1be172c157c3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3245,32 +3245,6 @@ static int __init setup_slub_nomerge(char *str)
3245 3245
3246__setup("slub_nomerge", setup_slub_nomerge); 3246__setup("slub_nomerge", setup_slub_nomerge);
3247 3247
3248static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3249 int size, unsigned int flags)
3250{
3251 struct kmem_cache *s;
3252
3253 s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3254
3255 s->name = name;
3256 s->size = s->object_size = size;
3257 s->align = ARCH_KMALLOC_MINALIGN;
3258
3259 /*
3260 * This function is called with IRQs disabled during early-boot on
3261 * single CPU so there's no need to take slab_mutex here.
3262 */
3263 if (kmem_cache_open(s, flags))
3264 goto panic;
3265
3266 list_add(&s->list, &slab_caches);
3267 return s;
3268
3269panic:
3270 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
3271 return NULL;
3272}
3273
3274/* 3248/*
3275 * Conversion table for small slabs sizes / 8 to the index in the 3249 * Conversion table for small slabs sizes / 8 to the index in the
3276 * kmalloc array. This is necessary for slabs < 192 since we have non power 3250 * kmalloc array. This is necessary for slabs < 192 since we have non power
@@ -3948,6 +3922,10 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3948 if (err) 3922 if (err)
3949 return err; 3923 return err;
3950 3924
3925 /* Mutex is not taken during early boot */
3926 if (slab_state <= UP)
3927 return 0;
3928
3951 mutex_unlock(&slab_mutex); 3929 mutex_unlock(&slab_mutex);
3952 err = sysfs_slab_add(s); 3930 err = sysfs_slab_add(s);
3953 mutex_lock(&slab_mutex); 3931 mutex_lock(&slab_mutex);
@@ -5249,13 +5227,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
5249{ 5227{
5250 int err; 5228 int err;
5251 const char *name; 5229 const char *name;
5252 int unmergeable; 5230 int unmergeable = slab_unmergeable(s);
5253
5254 if (slab_state < FULL)
5255 /* Defer until later */
5256 return 0;
5257 5231
5258 unmergeable = slab_unmergeable(s);
5259 if (unmergeable) { 5232 if (unmergeable) {
5260 /* 5233 /*
5261 * Slabcache can never be merged so we can use the name proper. 5234 * Slabcache can never be merged so we can use the name proper.