aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-10-17 07:36:51 -0400
committerPekka Enberg <penberg@kernel.org>2012-10-31 03:13:01 -0400
commitd8843922fba49e887874aa1f9e748d620c5092af (patch)
tree9992cb8d37d31368c06c7de3bd6edbe2a8a8a412
parent8cf9864b1382851d90c7c505f8441c8928f1469e (diff)
slab: Ignore internal flags in cache creation
Some flags are used internally by the allocators for management purposes. One example of that is the CFLGS_OFF_SLAB flag that slab uses to mark that the metadata for that cache is stored outside of the slab. No cache should ever pass those as a creation flags. We can just ignore this bit if it happens to be passed (such as when duplicating a cache in the kmem memcg patches). Because such flags can vary from allocator to allocator, we allow them to make their own decisions on that, defining SLAB_AVAILABLE_FLAGS with all flags that are valid at creation time. Allocators that doesn't have any specific flag requirement should define that to mean all flags. Common code will mask out all flags not belonging to that set. Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slab.c22
-rw-r--r--mm/slab.h25
-rw-r--r--mm/slab_common.c7
-rw-r--r--mm/slub.c3
4 files changed, 32 insertions, 25 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1f7fd5f51f87..6ebb9515a3e9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -162,23 +162,6 @@
162 */ 162 */
163static bool pfmemalloc_active __read_mostly; 163static bool pfmemalloc_active __read_mostly;
164 164
165/* Legal flag mask for kmem_cache_create(). */
166#if DEBUG
167# define CREATE_MASK (SLAB_RED_ZONE | \
168 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
169 SLAB_CACHE_DMA | \
170 SLAB_STORE_USER | \
171 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
172 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
173 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
174#else
175# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
176 SLAB_CACHE_DMA | \
177 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
178 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
179 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
180#endif
181
182/* 165/*
183 * kmem_bufctl_t: 166 * kmem_bufctl_t:
184 * 167 *
@@ -2378,11 +2361,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2378 if (flags & SLAB_DESTROY_BY_RCU) 2361 if (flags & SLAB_DESTROY_BY_RCU)
2379 BUG_ON(flags & SLAB_POISON); 2362 BUG_ON(flags & SLAB_POISON);
2380#endif 2363#endif
2381 /*
2382 * Always checks flags, a caller might be expecting debug support which
2383 * isn't available.
2384 */
2385 BUG_ON(flags & ~CREATE_MASK);
2386 2364
2387 /* 2365 /*
2388 * Check that size is in terms of words. This is needed to avoid 2366 * Check that size is in terms of words. This is needed to avoid
diff --git a/mm/slab.h b/mm/slab.h
index 5a43c2f13621..66a62d3536c6 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -45,6 +45,31 @@ static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t siz
45#endif 45#endif
46 46
47 47
48/* Legal flag mask for kmem_cache_create(), for various configurations */
49#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
50 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
51
52#if defined(CONFIG_DEBUG_SLAB)
53#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
54#elif defined(CONFIG_SLUB_DEBUG)
55#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
56 SLAB_TRACE | SLAB_DEBUG_FREE)
57#else
58#define SLAB_DEBUG_FLAGS (0)
59#endif
60
61#if defined(CONFIG_SLAB)
62#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
63 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
64#elif defined(CONFIG_SLUB)
65#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
66 SLAB_TEMPORARY | SLAB_NOTRACK)
67#else
68#define SLAB_CACHE_FLAGS (0)
69#endif
70
71#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
72
48int __kmem_cache_shutdown(struct kmem_cache *); 73int __kmem_cache_shutdown(struct kmem_cache *);
49 74
50struct seq_file; 75struct seq_file;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 5fb753da6cf0..b705be7faa48 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -109,6 +109,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
109 if (!kmem_cache_sanity_check(name, size) == 0) 109 if (!kmem_cache_sanity_check(name, size) == 0)
110 goto out_locked; 110 goto out_locked;
111 111
112 /*
113 * Some allocators will constraint the set of valid flags to a subset
114 * of all flags. We expect them to define CACHE_CREATE_MASK in this
115 * case, and we'll just provide them with a sanitized version of the
116 * passed flags.
117 */
118 flags &= CACHE_CREATE_MASK;
112 119
113 s = __kmem_cache_alias(name, size, align, flags, ctor); 120 s = __kmem_cache_alias(name, size, align, flags, ctor);
114 if (s) 121 if (s)
diff --git a/mm/slub.c b/mm/slub.c
index deee7c754a7d..b2ada3db4225 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -112,9 +112,6 @@
112 * the fast path and disables lockless freelists. 112 * the fast path and disables lockless freelists.
113 */ 113 */
114 114
115#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
116 SLAB_TRACE | SLAB_DEBUG_FREE)
117
118static inline int kmem_cache_debug(struct kmem_cache *s) 115static inline int kmem_cache_debug(struct kmem_cache *s)
119{ 116{
120#ifdef CONFIG_SLUB_DEBUG 117#ifdef CONFIG_SLUB_DEBUG