aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-07-04 21:26:44 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-05 18:54:13 -0400
commit87a927c715789853cc8331d76039a2fd657a832a (patch)
treec185e20386fd3ec8679d3d52bae822d9963df5da
parent2bcb1b7de9eeea969a25d5f2b4511195cca9f2a2 (diff)
Fix slab redzone alignment
Commit b46b8f19c9cd435ecac4d9d12b39d78c137ecd66 fixed a couple of bugs by switching the redzone to 64 bits. Unfortunately, it neglected to ensure that the _second_ redzone, after the slab object, is aligned correctly. This caused illegal instruction faults on sparc32, which for some reason not entirely clear to me are not trapped and fixed up. Two things need to be done to fix this: - increase the object size, rounding up to alignof(long long) so that the second redzone can be aligned correctly. - If SLAB_STORE_USER is set but alignof(long long)==8, allow a full 64 bits of space for the user word at the end of the buffer, even though we may not _use_ the whole 64 bits. This patch should be a no-op on any 64-bit architecture or any 32-bit architecture where alignof(long long) == 4. Of the others, it's tested on ppc32 by myself and a very similar patch was tested on sparc32 by Mark Fortescue, who reported the new problem. Also, fix the conditions for FORCED_DEBUG, which hadn't been adjusted to the new sizes. Again noticed by Mark. Signed-off-by: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a9c4472e9204..b344e6707128 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -137,6 +137,7 @@
137 137
138/* Shouldn't this be in a header file somewhere? */ 138/* Shouldn't this be in a header file somewhere? */
139#define BYTES_PER_WORD sizeof(void *) 139#define BYTES_PER_WORD sizeof(void *)
140#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
140 141
141#ifndef cache_line_size 142#ifndef cache_line_size
142#define cache_line_size() L1_CACHE_BYTES 143#define cache_line_size() L1_CACHE_BYTES
@@ -547,7 +548,7 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
547 if (cachep->flags & SLAB_STORE_USER) 548 if (cachep->flags & SLAB_STORE_USER)
548 return (unsigned long long *)(objp + cachep->buffer_size - 549 return (unsigned long long *)(objp + cachep->buffer_size -
549 sizeof(unsigned long long) - 550 sizeof(unsigned long long) -
550 BYTES_PER_WORD); 551 REDZONE_ALIGN);
551 return (unsigned long long *) (objp + cachep->buffer_size - 552 return (unsigned long long *) (objp + cachep->buffer_size -
552 sizeof(unsigned long long)); 553 sizeof(unsigned long long));
553} 554}
@@ -2178,7 +2179,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2178 * above the next power of two: caches with object sizes just above a 2179 * above the next power of two: caches with object sizes just above a
2179 * power of two have a significant amount of internal fragmentation. 2180 * power of two have a significant amount of internal fragmentation.
2180 */ 2181 */
2181 if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) 2182 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2183 2 * sizeof(unsigned long long)))
2182 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2184 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2183 if (!(flags & SLAB_DESTROY_BY_RCU)) 2185 if (!(flags & SLAB_DESTROY_BY_RCU))
2184 flags |= SLAB_POISON; 2186 flags |= SLAB_POISON;
@@ -2219,12 +2221,20 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2219 } 2221 }
2220 2222
2221 /* 2223 /*
2222 * Redzoning and user store require word alignment. Note this will be 2224 * Redzoning and user store require word alignment or possibly larger.
2223 * overridden by architecture or caller mandated alignment if either 2225 * Note this will be overridden by architecture or caller mandated
2224 * is greater than BYTES_PER_WORD. 2226 * alignment if either is greater than BYTES_PER_WORD.
2225 */ 2227 */
2226 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) 2228 if (flags & SLAB_STORE_USER)
2227 ralign = __alignof__(unsigned long long); 2229 ralign = BYTES_PER_WORD;
2230
2231 if (flags & SLAB_RED_ZONE) {
2232 ralign = REDZONE_ALIGN;
2233 /* If redzoning, ensure that the second redzone is suitably
2234 * aligned, by adjusting the object size accordingly. */
2235 size += REDZONE_ALIGN - 1;
2236 size &= ~(REDZONE_ALIGN - 1);
2237 }
2228 2238
2229 /* 2) arch mandated alignment */ 2239 /* 2) arch mandated alignment */
2230 if (ralign < ARCH_SLAB_MINALIGN) { 2240 if (ralign < ARCH_SLAB_MINALIGN) {
@@ -2261,9 +2271,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2261 } 2271 }
2262 if (flags & SLAB_STORE_USER) { 2272 if (flags & SLAB_STORE_USER) {
2263 /* user store requires one word storage behind the end of 2273 /* user store requires one word storage behind the end of
2264 * the real object. 2274 * the real object. But if the second red zone needs to be
2275 * aligned to 64 bits, we must allow that much space.
2265 */ 2276 */
2266 size += BYTES_PER_WORD; 2277 if (flags & SLAB_RED_ZONE)
2278 size += REDZONE_ALIGN;
2279 else
2280 size += BYTES_PER_WORD;
2267 } 2281 }
2268#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2282#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2269 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2283 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size