aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-07-06 16:25:10 -0400
committerPekka Enberg <penberg@kernel.org>2012-07-09 05:13:30 -0400
commit039363f38bfe5f6281e9eae5e0518b11577d9d50 (patch)
tree11aa16feccb68b035aa9e9f390a54e57fa2ffd83 /mm/slab_common.c
parent068ce415bea9e2b96bde76dc1bf6e672a89903ee (diff)
mm, sl[aou]b: Extract common code for kmem_cache_create()
Kmem_cache_create() does a variety of sanity checks but those vary depending on the allocator. Use the strictest tests and put them into a slab_common file. Make the tests conditional on CONFIG_DEBUG_VM. This patch has the effect of adding sanity checks for SLUB and SLOB under CONFIG_DEBUG_VM and removes the checks in SLAB for !CONFIG_DEBUG_VM. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c68
1 files changed, 68 insertions, 0 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
new file mode 100644
index 000000000000..80412beb67cc
--- /dev/null
+++ b/mm/slab_common.c
@@ -0,0 +1,68 @@
1/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
14
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
17#include <asm/page.h>
18
19/*
20 * kmem_cache_create - Create a cache.
21 * @name: A string which is used in /proc/slabinfo to identify this cache.
22 * @size: The size of objects to be created in this cache.
23 * @align: The required alignment for the objects.
24 * @flags: SLAB flags
25 * @ctor: A constructor for the objects.
26 *
27 * Returns a ptr to the cache on success, NULL on failure.
28 * Cannot be called within a interrupt, but can be interrupted.
29 * The @ctor is run when new pages are allocated by the cache.
30 *
31 * The flags are
32 *
33 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
34 * to catch references to uninitialised memory.
35 *
36 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
37 * for buffer overruns.
38 *
39 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
40 * cacheline. This can be beneficial if you're counting cycles as closely
41 * as davem.
42 */
43
44struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
45 unsigned long flags, void (*ctor)(void *))
46{
47 struct kmem_cache *s = NULL;
48
49#ifdef CONFIG_DEBUG_VM
50 if (!name || in_interrupt() || size < sizeof(void *) ||
51 size > KMALLOC_MAX_SIZE) {
52 printk(KERN_ERR "kmem_cache_create(%s) integrity check"
53 " failed\n", name);
54 goto out;
55 }
56#endif
57
58 s = __kmem_cache_create(name, size, align, flags, ctor);
59
60#ifdef CONFIG_DEBUG_VM
61out:
62#endif
63 if (!s && (flags & SLAB_PANIC))
64 panic("kmem_cache_create: Failed to create slab '%s'\n", name);
65
66 return s;
67}
68EXPORT_SYMBOL(kmem_cache_create);