aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-09 05:32:35 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:44 -0400
commit65c02d4cfbbd10188ded3d6577922ab034d943ba (patch)
treec7b5165775951bb8e78f4e483b69c969042e8208 /mm/slub.c
parent97416ce82e20a9511ec369822098a8d20998398a (diff)
SLUB: add support for dynamic cacheline size determination
SLUB currently assumes that the cacheline size is static. However, i386 f.e. supports dynamic cache line size determination. Use cache_line_size() instead of L1_CACHE_BYTES in the allocator. That also explains the purpose of SLAB_HWCACHE_ALIGN. So we will need to keep that one around to allow dynamic aligning of objects depending on boot determination of the cache line size. [akpm@linux-foundation.org: need to define it before we use it] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5db3da5a60bf..40e92d8d4bc6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -157,6 +157,11 @@
157/* Internal SLUB flags */ 157/* Internal SLUB flags */
158#define __OBJECT_POISON 0x80000000 /* Poison object */ 158#define __OBJECT_POISON 0x80000000 /* Poison object */
159 159
160/* Not all arches define cache_line_size */
161#ifndef cache_line_size
162#define cache_line_size() L1_CACHE_BYTES
163#endif
164
160static int kmem_size = sizeof(struct kmem_cache); 165static int kmem_size = sizeof(struct kmem_cache);
161 166
162#ifdef CONFIG_SMP 167#ifdef CONFIG_SMP
@@ -1480,8 +1485,8 @@ static unsigned long calculate_alignment(unsigned long flags,
1480 * then use it. 1485 * then use it.
1481 */ 1486 */
1482 if ((flags & SLAB_HWCACHE_ALIGN) && 1487 if ((flags & SLAB_HWCACHE_ALIGN) &&
1483 size > L1_CACHE_BYTES / 2) 1488 size > cache_line_size() / 2)
1484 return max_t(unsigned long, align, L1_CACHE_BYTES); 1489 return max_t(unsigned long, align, cache_line_size());
1485 1490
1486 if (align < ARCH_SLAB_MINALIGN) 1491 if (align < ARCH_SLAB_MINALIGN)
1487 return ARCH_SLAB_MINALIGN; 1492 return ARCH_SLAB_MINALIGN;
@@ -1667,8 +1672,8 @@ static int calculate_sizes(struct kmem_cache *s)
1667 size += sizeof(void *); 1672 size += sizeof(void *);
1668 /* 1673 /*
1669 * Determine the alignment based on various parameters that the 1674 * Determine the alignment based on various parameters that the
1670 * user specified (this is unecessarily complex due to the attempt 1675 * user specified and the dynamic determination of cache line size
1671 * to be compatible with SLAB. Should be cleaned up some day). 1676 * on bootup.
1672 */ 1677 */
1673 align = calculate_alignment(flags, align, s->objsize); 1678 align = calculate_alignment(flags, align, s->objsize);
1674 1679
@@ -2280,7 +2285,7 @@ void __init kmem_cache_init(void)
2280 2285
2281 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 2286 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2282 " Processors=%d, Nodes=%d\n", 2287 " Processors=%d, Nodes=%d\n",
2283 KMALLOC_SHIFT_HIGH, L1_CACHE_BYTES, 2288 KMALLOC_SHIFT_HIGH, cache_line_size(),
2284 slub_min_order, slub_max_order, slub_min_objects, 2289 slub_min_order, slub_max_order, slub_min_objects,
2285 nr_cpu_ids, nr_node_ids); 2290 nr_cpu_ids, nr_node_ids);
2286} 2291}