aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-05-08 03:22:59 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:14:57 -0400
commitb46b8f19c9cd435ecac4d9d12b39d78c137ecd66 (patch)
tree4b1e393eeb42f70867d30a7d0116ff948941095b /mm
parent5b94f675f57e4ff16c8fda09088d7480a84dcd91 (diff)
Increase slab redzone to 64bits
There are two problems with the existing redzone implementation. Firstly, it's causing misalignment of structures which contain a 64-bit integer, such as netfilter's 'struct ipt_entry' -- causing netfilter modules to fail to load because of the misalignment. (In particular, the first check in net/ipv4/netfilter/ip_tables.c::check_entry_size_and_hooks()) On ppc32 and sparc32, amongst others, __alignof__(uint64_t) == 8. With slab debugging, we use 32-bit redzones. And allocated slab objects aren't sufficiently aligned to hold a structure containing a uint64_t. By _just_ setting ARCH_KMALLOC_MINALIGN to __alignof__(u64) we'd disable redzone checks on those architectures. By using 64-bit redzones we avoid that loss of debugging, and also fix the other problem while we're at it. When investigating this, I noticed that on 64-bit platforms we're using a 32-bit value of RED_ACTIVE/RED_INACTIVE in the 64-bit memory location set aside for the redzone. Which means that the four bytes immediately before or after the allocated object at 0x00,0x00,0x00,0x00 for LE and BE machines, respectively. Which is probably not the most useful choice of poison value. One way to fix both of those at once is just to switch to 64-bit redzones in all cases. Signed-off-by: David Woodhouse <dwmw2@infradead.org> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Christoph Lameter <clameter@engr.sgi.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 5920a412b377..1115e2065bfc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -148,10 +148,11 @@
148 * Usually, the kmalloc caches are cache_line_size() aligned, except when 148 * Usually, the kmalloc caches are cache_line_size() aligned, except when
149 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 149 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
150 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 150 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
151 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. 151 * alignment larger than the alignment of a 64-bit integer.
152 * Note that this flag disables some debug features. 152 * ARCH_KMALLOC_MINALIGN allows that.
153 * Note that increasing this value may disable some debug features.
153 */ 154 */
154#define ARCH_KMALLOC_MINALIGN 0 155#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
155#endif 156#endif
156 157
157#ifndef ARCH_SLAB_MINALIGN 158#ifndef ARCH_SLAB_MINALIGN
@@ -536,19 +537,22 @@ static int obj_size(struct kmem_cache *cachep)
536 return cachep->obj_size; 537 return cachep->obj_size;
537} 538}
538 539
539static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 540static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
540{ 541{
541 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 542 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
542 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); 543 return (unsigned long long*) (objp + obj_offset(cachep) -
544 sizeof(unsigned long long));
543} 545}
544 546
545static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 547static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
546{ 548{
547 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 549 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
548 if (cachep->flags & SLAB_STORE_USER) 550 if (cachep->flags & SLAB_STORE_USER)
549 return (unsigned long *)(objp + cachep->buffer_size - 551 return (unsigned long long *)(objp + cachep->buffer_size -
550 2 * BYTES_PER_WORD); 552 sizeof(unsigned long long) -
551 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); 553 BYTES_PER_WORD);
554 return (unsigned long long *) (objp + cachep->buffer_size -
555 sizeof(unsigned long long));
552} 556}
553 557
554static void **dbg_userword(struct kmem_cache *cachep, void *objp) 558static void **dbg_userword(struct kmem_cache *cachep, void *objp)
@@ -561,8 +565,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
561 565
562#define obj_offset(x) 0 566#define obj_offset(x) 0
563#define obj_size(cachep) (cachep->buffer_size) 567#define obj_size(cachep) (cachep->buffer_size)
564#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 568#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
565#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 569#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
566#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 570#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
567 571
568#endif 572#endif
@@ -1776,7 +1780,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1776 char *realobj; 1780 char *realobj;
1777 1781
1778 if (cachep->flags & SLAB_RED_ZONE) { 1782 if (cachep->flags & SLAB_RED_ZONE) {
1779 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", 1783 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1780 *dbg_redzone1(cachep, objp), 1784 *dbg_redzone1(cachep, objp),
1781 *dbg_redzone2(cachep, objp)); 1785 *dbg_redzone2(cachep, objp));
1782 } 1786 }
@@ -2239,7 +2243,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2239 * is greater than BYTES_PER_WORD. 2243 * is greater than BYTES_PER_WORD.
2240 */ 2244 */
2241 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) 2245 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
2242 ralign = BYTES_PER_WORD; 2246 ralign = __alignof__(unsigned long long);
2243 2247
2244 /* 2) arch mandated alignment */ 2248 /* 2) arch mandated alignment */
2245 if (ralign < ARCH_SLAB_MINALIGN) { 2249 if (ralign < ARCH_SLAB_MINALIGN) {
@@ -2250,7 +2254,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2250 ralign = align; 2254 ralign = align;
2251 } 2255 }
2252 /* disable debug if necessary */ 2256 /* disable debug if necessary */
2253 if (ralign > BYTES_PER_WORD) 2257 if (ralign > __alignof__(unsigned long long))
2254 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2258 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2255 /* 2259 /*
2256 * 4) Store it. 2260 * 4) Store it.
@@ -2271,8 +2275,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2271 */ 2275 */
2272 if (flags & SLAB_RED_ZONE) { 2276 if (flags & SLAB_RED_ZONE) {
2273 /* add space for red zone words */ 2277 /* add space for red zone words */
2274 cachep->obj_offset += BYTES_PER_WORD; 2278 cachep->obj_offset += sizeof(unsigned long long);
2275 size += 2 * BYTES_PER_WORD; 2279 size += 2 * sizeof(unsigned long long);
2276 } 2280 }
2277 if (flags & SLAB_STORE_USER) { 2281 if (flags & SLAB_STORE_USER) {
2278 /* user store requires one word storage behind the end of 2282 /* user store requires one word storage behind the end of
@@ -2833,7 +2837,7 @@ static void kfree_debugcheck(const void *objp)
2833 2837
2834static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2838static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2835{ 2839{
2836 unsigned long redzone1, redzone2; 2840 unsigned long long redzone1, redzone2;
2837 2841
2838 redzone1 = *dbg_redzone1(cache, obj); 2842 redzone1 = *dbg_redzone1(cache, obj);
2839 redzone2 = *dbg_redzone2(cache, obj); 2843 redzone2 = *dbg_redzone2(cache, obj);
@@ -2849,7 +2853,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2849 else 2853 else
2850 slab_error(cache, "memory outside object was overwritten"); 2854 slab_error(cache, "memory outside object was overwritten");
2851 2855
2852 printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n", 2856 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2853 obj, redzone1, redzone2); 2857 obj, redzone1, redzone2);
2854} 2858}
2855 2859
@@ -3065,7 +3069,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3065 slab_error(cachep, "double free, or memory outside" 3069 slab_error(cachep, "double free, or memory outside"
3066 " object was overwritten"); 3070 " object was overwritten");
3067 printk(KERN_ERR 3071 printk(KERN_ERR
3068 "%p: redzone 1:0x%lx, redzone 2:0x%lx\n", 3072 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3069 objp, *dbg_redzone1(cachep, objp), 3073 objp, *dbg_redzone1(cachep, objp),
3070 *dbg_redzone2(cachep, objp)); 3074 *dbg_redzone2(cachep, objp));
3071 } 3075 }