aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-06-16 13:16:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-16 16:16:16 -0400
commit4b356be019d0c28f67af02809df7072c1c8f7d32 (patch)
tree03c340e3168a1cae72fd7c96855382ac0c195da6 /include/linux/slub_def.h
parent8dab5241d06bfc9ee141ea78c56cde5070d7460d (diff)
SLUB: minimum alignment fixes
If ARCH_KMALLOC_MINALIGN is set to a value greater than 8 (SLUBs smallest kmalloc cache) then SLUB may generate duplicate slabs in sysfs (yes again) because the object size is padded to reach ARCH_KMALLOC_MINALIGN. Thus the size of the small slabs is all the same. No arch sets ARCH_KMALLOC_MINALIGN larger than 8 though except mips which for some reason wants a 128 byte alignment. This patch increases the size of the smallest cache if ARCH_KMALLOC_MINALIGN is greater than 8. In that case more and more of the smallest caches are disabled. If we do that then the count of the active general caches that is displayed on boot is not correct anymore since we may skip elements of the kmalloc array. So count them separately. This approach was tested by Havard yesterday. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Haavard Skinnemoen <hskinnemoen@atmel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h13
1 files changed, 11 insertions, 2 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a0ad37463d62..6207a3d8da71 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -28,7 +28,7 @@ struct kmem_cache {
28 int size; /* The size of an object including meta data */ 28 int size; /* The size of an object including meta data */
29 int objsize; /* The size of an object without meta data */ 29 int objsize; /* The size of an object without meta data */
30 int offset; /* Free pointer offset. */ 30 int offset; /* Free pointer offset. */
31 unsigned int order; 31 int order;
32 32
33 /* 33 /*
34 * Avoid an extra cache line for UP, SMP and for the node local to 34 * Avoid an extra cache line for UP, SMP and for the node local to
@@ -56,7 +56,13 @@ struct kmem_cache {
56/* 56/*
57 * Kmalloc subsystem. 57 * Kmalloc subsystem.
58 */ 58 */
59#define KMALLOC_SHIFT_LOW 3 59#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
60#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
61#else
62#define KMALLOC_MIN_SIZE 8
63#endif
64
65#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
60 66
61/* 67/*
62 * We keep the general caches in an array of slab caches that are used for 68 * We keep the general caches in an array of slab caches that are used for
@@ -76,6 +82,9 @@ static inline int kmalloc_index(size_t size)
76 if (size > KMALLOC_MAX_SIZE) 82 if (size > KMALLOC_MAX_SIZE)
77 return -1; 83 return -1;
78 84
85 if (size <= KMALLOC_MIN_SIZE)
86 return KMALLOC_SHIFT_LOW;
87
79 if (size > 64 && size <= 96) 88 if (size > 64 && size <= 96)
80 return 1; 89 return 1;
81 if (size > 128 && size <= 192) 90 if (size > 128 && size <= 192)