diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 22 | ||||
-rw-r--r-- | mm/slab.h | 25 | ||||
-rw-r--r-- | mm/slab_common.c | 7 | ||||
-rw-r--r-- | mm/slub.c | 3 |
4 files changed, 32 insertions, 25 deletions
@@ -162,23 +162,6 @@ | |||
162 | */ | 162 | */ |
163 | static bool pfmemalloc_active __read_mostly; | 163 | static bool pfmemalloc_active __read_mostly; |
164 | 164 | ||
165 | /* Legal flag mask for kmem_cache_create(). */ | ||
166 | #if DEBUG | ||
167 | # define CREATE_MASK (SLAB_RED_ZONE | \ | ||
168 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ | ||
169 | SLAB_CACHE_DMA | \ | ||
170 | SLAB_STORE_USER | \ | ||
171 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | ||
172 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | ||
173 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) | ||
174 | #else | ||
175 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | ||
176 | SLAB_CACHE_DMA | \ | ||
177 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | ||
178 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | ||
179 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) | ||
180 | #endif | ||
181 | |||
182 | /* | 165 | /* |
183 | * kmem_bufctl_t: | 166 | * kmem_bufctl_t: |
184 | * | 167 | * |
@@ -2378,11 +2361,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2378 | if (flags & SLAB_DESTROY_BY_RCU) | 2361 | if (flags & SLAB_DESTROY_BY_RCU) |
2379 | BUG_ON(flags & SLAB_POISON); | 2362 | BUG_ON(flags & SLAB_POISON); |
2380 | #endif | 2363 | #endif |
2381 | /* | ||
2382 | * Always checks flags, a caller might be expecting debug support which | ||
2383 | * isn't available. | ||
2384 | */ | ||
2385 | BUG_ON(flags & ~CREATE_MASK); | ||
2386 | 2364 | ||
2387 | /* | 2365 | /* |
2388 | * Check that size is in terms of words. This is needed to avoid | 2366 | * Check that size is in terms of words. This is needed to avoid |
@@ -45,6 +45,31 @@ static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t siz | |||
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | 47 | ||
48 | /* Legal flag mask for kmem_cache_create(), for various configurations */ | ||
49 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ | ||
50 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) | ||
51 | |||
52 | #if defined(CONFIG_DEBUG_SLAB) | ||
53 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | ||
54 | #elif defined(CONFIG_SLUB_DEBUG) | ||
55 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | ||
56 | SLAB_TRACE | SLAB_DEBUG_FREE) | ||
57 | #else | ||
58 | #define SLAB_DEBUG_FLAGS (0) | ||
59 | #endif | ||
60 | |||
61 | #if defined(CONFIG_SLAB) | ||
62 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | ||
63 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) | ||
64 | #elif defined(CONFIG_SLUB) | ||
65 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | ||
66 | SLAB_TEMPORARY | SLAB_NOTRACK) | ||
67 | #else | ||
68 | #define SLAB_CACHE_FLAGS (0) | ||
69 | #endif | ||
70 | |||
71 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) | ||
72 | |||
48 | int __kmem_cache_shutdown(struct kmem_cache *); | 73 | int __kmem_cache_shutdown(struct kmem_cache *); |
49 | 74 | ||
50 | struct seq_file; | 75 | struct seq_file; |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 5fb753da6cf0..b705be7faa48 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -109,6 +109,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align | |||
109 | if (!kmem_cache_sanity_check(name, size) == 0) | 109 | if (!kmem_cache_sanity_check(name, size) == 0) |
110 | goto out_locked; | 110 | goto out_locked; |
111 | 111 | ||
112 | /* | ||
113 | * Some allocators will constraint the set of valid flags to a subset | ||
114 | * of all flags. We expect them to define CACHE_CREATE_MASK in this | ||
115 | * case, and we'll just provide them with a sanitized version of the | ||
116 | * passed flags. | ||
117 | */ | ||
118 | flags &= CACHE_CREATE_MASK; | ||
112 | 119 | ||
113 | s = __kmem_cache_alias(name, size, align, flags, ctor); | 120 | s = __kmem_cache_alias(name, size, align, flags, ctor); |
114 | if (s) | 121 | if (s) |
@@ -112,9 +112,6 @@ | |||
112 | * the fast path and disables lockless freelists. | 112 | * the fast path and disables lockless freelists. |
113 | */ | 113 | */ |
114 | 114 | ||
115 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | ||
116 | SLAB_TRACE | SLAB_DEBUG_FREE) | ||
117 | |||
118 | static inline int kmem_cache_debug(struct kmem_cache *s) | 115 | static inline int kmem_cache_debug(struct kmem_cache *s) |
119 | { | 116 | { |
120 | #ifdef CONFIG_SLUB_DEBUG | 117 | #ifdef CONFIG_SLUB_DEBUG |