aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a9c4472e9204..b344e6707128 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -137,6 +137,7 @@
137 137
138/* Shouldn't this be in a header file somewhere? */ 138/* Shouldn't this be in a header file somewhere? */
139#define BYTES_PER_WORD sizeof(void *) 139#define BYTES_PER_WORD sizeof(void *)
140#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
140 141
141#ifndef cache_line_size 142#ifndef cache_line_size
142#define cache_line_size() L1_CACHE_BYTES 143#define cache_line_size() L1_CACHE_BYTES
@@ -547,7 +548,7 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
547 if (cachep->flags & SLAB_STORE_USER) 548 if (cachep->flags & SLAB_STORE_USER)
548 return (unsigned long long *)(objp + cachep->buffer_size - 549 return (unsigned long long *)(objp + cachep->buffer_size -
549 sizeof(unsigned long long) - 550 sizeof(unsigned long long) -
550 BYTES_PER_WORD); 551 REDZONE_ALIGN);
551 return (unsigned long long *) (objp + cachep->buffer_size - 552 return (unsigned long long *) (objp + cachep->buffer_size -
552 sizeof(unsigned long long)); 553 sizeof(unsigned long long));
553} 554}
@@ -2178,7 +2179,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2178 * above the next power of two: caches with object sizes just above a 2179 * above the next power of two: caches with object sizes just above a
2179 * power of two have a significant amount of internal fragmentation. 2180 * power of two have a significant amount of internal fragmentation.
2180 */ 2181 */
2181 if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) 2182 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2183 2 * sizeof(unsigned long long)))
2182 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2184 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2183 if (!(flags & SLAB_DESTROY_BY_RCU)) 2185 if (!(flags & SLAB_DESTROY_BY_RCU))
2184 flags |= SLAB_POISON; 2186 flags |= SLAB_POISON;
@@ -2219,12 +2221,20 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2219 } 2221 }
2220 2222
2221 /* 2223 /*
2222 * Redzoning and user store require word alignment. Note this will be 2224 * Redzoning and user store require word alignment or possibly larger.
2223 * overridden by architecture or caller mandated alignment if either 2225 * Note this will be overridden by architecture or caller mandated
2224 * is greater than BYTES_PER_WORD. 2226 * alignment if either is greater than BYTES_PER_WORD.
2225 */ 2227 */
2226 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) 2228 if (flags & SLAB_STORE_USER)
2227 ralign = __alignof__(unsigned long long); 2229 ralign = BYTES_PER_WORD;
2230
2231 if (flags & SLAB_RED_ZONE) {
2232 ralign = REDZONE_ALIGN;
2233 /* If redzoning, ensure that the second redzone is suitably
2234 * aligned, by adjusting the object size accordingly. */
2235 size += REDZONE_ALIGN - 1;
2236 size &= ~(REDZONE_ALIGN - 1);
2237 }
2228 2238
2229 /* 2) arch mandated alignment */ 2239 /* 2) arch mandated alignment */
2230 if (ralign < ARCH_SLAB_MINALIGN) { 2240 if (ralign < ARCH_SLAB_MINALIGN) {
@@ -2261,9 +2271,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2261 } 2271 }
2262 if (flags & SLAB_STORE_USER) { 2272 if (flags & SLAB_STORE_USER) {
2263 /* user store requires one word storage behind the end of 2273 /* user store requires one word storage behind the end of
2264 * the real object. 2274 * the real object. But if the second red zone needs to be
2275 * aligned to 64 bits, we must allow that much space.
2265 */ 2276 */
2266 size += BYTES_PER_WORD; 2277 if (flags & SLAB_RED_ZONE)
2278 size += REDZONE_ALIGN;
2279 else
2280 size += BYTES_PER_WORD;
2267 } 2281 }
2268#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2282#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2269 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2283 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size