aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h4
-rw-r--r--mm/Makefile3
-rw-r--r--mm/slab.c24
-rw-r--r--mm/slab_common.c68
-rw-r--r--mm/slob.c8
-rw-r--r--mm/slub.c11
6 files changed, 87 insertions, 31 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0dd2dfa7beca..0cb7c7eb0416 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -130,6 +130,10 @@ int kmem_cache_shrink(struct kmem_cache *);
130void kmem_cache_free(struct kmem_cache *, void *); 130void kmem_cache_free(struct kmem_cache *, void *);
131unsigned int kmem_cache_size(struct kmem_cache *); 131unsigned int kmem_cache_size(struct kmem_cache *);
132 132
133/* Slab internal function */
134struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t,
135 unsigned long,
136 void (*)(void *));
133/* 137/*
134 * Please use this macro to create slab caches. Simply specify the 138 * Please use this macro to create slab caches. Simply specify the
135 * name of the structure and maybe some flags that are listed above. 139 * name of the structure and maybe some flags that are listed above.
diff --git a/mm/Makefile b/mm/Makefile
index a156285ce88d..ae370783612d 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -16,7 +16,8 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
16 readahead.o swap.o truncate.o vmscan.o shmem.o \ 16 readahead.o swap.o truncate.o vmscan.o shmem.o \
17 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 17 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
18 page_isolation.o mm_init.o mmu_context.o percpu.o \ 18 page_isolation.o mm_init.o mmu_context.o percpu.o \
19 compaction.o $(mmu-y) 19 compaction.o slab_common.o $(mmu-y)
20
20obj-y += init-mm.o 21obj-y += init-mm.o
21 22
22ifdef CONFIG_NO_BOOTMEM 23ifdef CONFIG_NO_BOOTMEM
diff --git a/mm/slab.c b/mm/slab.c
index 105f188d14a3..10c821e492bf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1558,7 +1558,7 @@ void __init kmem_cache_init(void)
1558 * bug. 1558 * bug.
1559 */ 1559 */
1560 1560
1561 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1561 sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
1562 sizes[INDEX_AC].cs_size, 1562 sizes[INDEX_AC].cs_size,
1563 ARCH_KMALLOC_MINALIGN, 1563 ARCH_KMALLOC_MINALIGN,
1564 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1564 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
@@ -1566,7 +1566,7 @@ void __init kmem_cache_init(void)
1566 1566
1567 if (INDEX_AC != INDEX_L3) { 1567 if (INDEX_AC != INDEX_L3) {
1568 sizes[INDEX_L3].cs_cachep = 1568 sizes[INDEX_L3].cs_cachep =
1569 kmem_cache_create(names[INDEX_L3].name, 1569 __kmem_cache_create(names[INDEX_L3].name,
1570 sizes[INDEX_L3].cs_size, 1570 sizes[INDEX_L3].cs_size,
1571 ARCH_KMALLOC_MINALIGN, 1571 ARCH_KMALLOC_MINALIGN,
1572 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1572 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
@@ -1584,14 +1584,14 @@ void __init kmem_cache_init(void)
1584 * allow tighter packing of the smaller caches. 1584 * allow tighter packing of the smaller caches.
1585 */ 1585 */
1586 if (!sizes->cs_cachep) { 1586 if (!sizes->cs_cachep) {
1587 sizes->cs_cachep = kmem_cache_create(names->name, 1587 sizes->cs_cachep = __kmem_cache_create(names->name,
1588 sizes->cs_size, 1588 sizes->cs_size,
1589 ARCH_KMALLOC_MINALIGN, 1589 ARCH_KMALLOC_MINALIGN,
1590 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1590 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1591 NULL); 1591 NULL);
1592 } 1592 }
1593#ifdef CONFIG_ZONE_DMA 1593#ifdef CONFIG_ZONE_DMA
1594 sizes->cs_dmacachep = kmem_cache_create( 1594 sizes->cs_dmacachep = __kmem_cache_create(
1595 names->name_dma, 1595 names->name_dma,
1596 sizes->cs_size, 1596 sizes->cs_size,
1597 ARCH_KMALLOC_MINALIGN, 1597 ARCH_KMALLOC_MINALIGN,
@@ -2220,7 +2220,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2220} 2220}
2221 2221
2222/** 2222/**
2223 * kmem_cache_create - Create a cache. 2223 * __kmem_cache_create - Create a cache.
2224 * @name: A string which is used in /proc/slabinfo to identify this cache. 2224 * @name: A string which is used in /proc/slabinfo to identify this cache.
2225 * @size: The size of objects to be created in this cache. 2225 * @size: The size of objects to be created in this cache.
2226 * @align: The required alignment for the objects. 2226 * @align: The required alignment for the objects.
@@ -2247,7 +2247,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2247 * as davem. 2247 * as davem.
2248 */ 2248 */
2249struct kmem_cache * 2249struct kmem_cache *
2250kmem_cache_create (const char *name, size_t size, size_t align, 2250__kmem_cache_create (const char *name, size_t size, size_t align,
2251 unsigned long flags, void (*ctor)(void *)) 2251 unsigned long flags, void (*ctor)(void *))
2252{ 2252{
2253 size_t left_over, slab_size, ralign; 2253 size_t left_over, slab_size, ralign;
@@ -2388,7 +2388,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2388 /* Get cache's description obj. */ 2388 /* Get cache's description obj. */
2389 cachep = kmem_cache_zalloc(&cache_cache, gfp); 2389 cachep = kmem_cache_zalloc(&cache_cache, gfp);
2390 if (!cachep) 2390 if (!cachep)
2391 goto oops; 2391 return NULL;
2392 2392
2393 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; 2393 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2394 cachep->object_size = size; 2394 cachep->object_size = size;
@@ -2445,8 +2445,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2445 printk(KERN_ERR 2445 printk(KERN_ERR
2446 "kmem_cache_create: couldn't create cache %s.\n", name); 2446 "kmem_cache_create: couldn't create cache %s.\n", name);
2447 kmem_cache_free(&cache_cache, cachep); 2447 kmem_cache_free(&cache_cache, cachep);
2448 cachep = NULL; 2448 return NULL;
2449 goto oops;
2450 } 2449 }
2451 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2450 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2452 + sizeof(struct slab), align); 2451 + sizeof(struct slab), align);
@@ -2504,8 +2503,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2504 2503
2505 if (setup_cpu_cache(cachep, gfp)) { 2504 if (setup_cpu_cache(cachep, gfp)) {
2506 __kmem_cache_destroy(cachep); 2505 __kmem_cache_destroy(cachep);
2507 cachep = NULL; 2506 return NULL;
2508 goto oops;
2509 } 2507 }
2510 2508
2511 if (flags & SLAB_DEBUG_OBJECTS) { 2509 if (flags & SLAB_DEBUG_OBJECTS) {
@@ -2521,16 +2519,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2521 /* cache setup completed, link it into the list */ 2519 /* cache setup completed, link it into the list */
2522 list_add(&cachep->list, &cache_chain); 2520 list_add(&cachep->list, &cache_chain);
2523oops: 2521oops:
2524 if (!cachep && (flags & SLAB_PANIC))
2525 panic("kmem_cache_create(): failed to create slab `%s'\n",
2526 name);
2527 if (slab_is_available()) { 2522 if (slab_is_available()) {
2528 mutex_unlock(&cache_chain_mutex); 2523 mutex_unlock(&cache_chain_mutex);
2529 put_online_cpus(); 2524 put_online_cpus();
2530 } 2525 }
2531 return cachep; 2526 return cachep;
2532} 2527}
2533EXPORT_SYMBOL(kmem_cache_create);
2534 2528
2535#if DEBUG 2529#if DEBUG
2536static void check_irq_off(void) 2530static void check_irq_off(void)
diff --git a/mm/slab_common.c b/mm/slab_common.c
new file mode 100644
index 000000000000..80412beb67cc
--- /dev/null
+++ b/mm/slab_common.c
@@ -0,0 +1,68 @@
1/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
14
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
17#include <asm/page.h>
18
19/*
20 * kmem_cache_create - Create a cache.
21 * @name: A string which is used in /proc/slabinfo to identify this cache.
22 * @size: The size of objects to be created in this cache.
23 * @align: The required alignment for the objects.
24 * @flags: SLAB flags
25 * @ctor: A constructor for the objects.
26 *
27 * Returns a ptr to the cache on success, NULL on failure.
28 * Cannot be called within a interrupt, but can be interrupted.
29 * The @ctor is run when new pages are allocated by the cache.
30 *
31 * The flags are
32 *
33 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
34 * to catch references to uninitialised memory.
35 *
36 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
37 * for buffer overruns.
38 *
39 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
40 * cacheline. This can be beneficial if you're counting cycles as closely
41 * as davem.
42 */
43
44struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
45 unsigned long flags, void (*ctor)(void *))
46{
47 struct kmem_cache *s = NULL;
48
49#ifdef CONFIG_DEBUG_VM
50 if (!name || in_interrupt() || size < sizeof(void *) ||
51 size > KMALLOC_MAX_SIZE) {
52 printk(KERN_ERR "kmem_cache_create(%s) integrity check"
53 " failed\n", name);
54 goto out;
55 }
56#endif
57
58 s = __kmem_cache_create(name, size, align, flags, ctor);
59
60#ifdef CONFIG_DEBUG_VM
61out:
62#endif
63 if (!s && (flags & SLAB_PANIC))
64 panic("kmem_cache_create: Failed to create slab '%s'\n", name);
65
66 return s;
67}
68EXPORT_SYMBOL(kmem_cache_create);
diff --git a/mm/slob.c b/mm/slob.c
index 95d1c7dd88e0..d63923d549ec 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -506,7 +506,7 @@ size_t ksize(const void *block)
506} 506}
507EXPORT_SYMBOL(ksize); 507EXPORT_SYMBOL(ksize);
508 508
509struct kmem_cache *kmem_cache_create(const char *name, size_t size, 509struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
510 size_t align, unsigned long flags, void (*ctor)(void *)) 510 size_t align, unsigned long flags, void (*ctor)(void *))
511{ 511{
512 struct kmem_cache *c; 512 struct kmem_cache *c;
@@ -529,13 +529,11 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
529 c->align = ARCH_SLAB_MINALIGN; 529 c->align = ARCH_SLAB_MINALIGN;
530 if (c->align < align) 530 if (c->align < align)
531 c->align = align; 531 c->align = align;
532 } else if (flags & SLAB_PANIC)
533 panic("Cannot create slab cache %s\n", name);
534 532
535 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); 533 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
534 }
536 return c; 535 return c;
537} 536}
538EXPORT_SYMBOL(kmem_cache_create);
539 537
540void kmem_cache_destroy(struct kmem_cache *c) 538void kmem_cache_destroy(struct kmem_cache *c)
541{ 539{
diff --git a/mm/slub.c b/mm/slub.c
index 79fe9c6b93cf..6551cc9a51f8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3920,15 +3920,12 @@ static struct kmem_cache *find_mergeable(size_t size,
3920 return NULL; 3920 return NULL;
3921} 3921}
3922 3922
3923struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3923struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3924 size_t align, unsigned long flags, void (*ctor)(void *)) 3924 size_t align, unsigned long flags, void (*ctor)(void *))
3925{ 3925{
3926 struct kmem_cache *s; 3926 struct kmem_cache *s;
3927 char *n; 3927 char *n;
3928 3928
3929 if (WARN_ON(!name))
3930 return NULL;
3931
3932 down_write(&slub_lock); 3929 down_write(&slub_lock);
3933 s = find_mergeable(size, align, flags, name, ctor); 3930 s = find_mergeable(size, align, flags, name, ctor);
3934 if (s) { 3931 if (s) {
@@ -3972,14 +3969,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3972 kfree(n); 3969 kfree(n);
3973err: 3970err:
3974 up_write(&slub_lock); 3971 up_write(&slub_lock);
3975
3976 if (flags & SLAB_PANIC)
3977 panic("Cannot create slabcache %s\n", name);
3978 else
3979 s = NULL;
3980 return s; 3972 return s;
3981} 3973}
3982EXPORT_SYMBOL(kmem_cache_create);
3983 3974
3984#ifdef CONFIG_SMP 3975#ifdef CONFIG_SMP
3985/* 3976/*