aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-06-16 13:16:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-16 16:16:16 -0400
commit4b356be019d0c28f67af02809df7072c1c8f7d32 (patch)
tree03c340e3168a1cae72fd7c96855382ac0c195da6 /mm/slub.c
parent8dab5241d06bfc9ee141ea78c56cde5070d7460d (diff)
SLUB: minimum alignment fixes
If ARCH_KMALLOC_MINALIGN is set to a value greater than 8 (SLUBs smallest kmalloc cache) then SLUB may generate duplicate slabs in sysfs (yes again) because the object size is padded to reach ARCH_KMALLOC_MINALIGN. Thus the size of the small slabs is all the same. No arch sets ARCH_KMALLOC_MINALIGN larger than 8 though except mips which for some reason wants a 128 byte alignment. This patch increases the size of the smallest cache if ARCH_KMALLOC_MINALIGN is greater than 8. In that case more and more of the smallest caches are disabled. If we do that then the count of the active general caches that is displayed on boot is not correct anymore since we may skip elements of the kmalloc array. So count them separately. This approach was tested by Havard yesterday. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Haavard Skinnemoen <hskinnemoen@atmel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2a1338c516fc..fa28b1623644 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2436,6 +2436,7 @@ EXPORT_SYMBOL(krealloc);
2436void __init kmem_cache_init(void) 2436void __init kmem_cache_init(void)
2437{ 2437{
2438 int i; 2438 int i;
2439 int caches = 0;
2439 2440
2440#ifdef CONFIG_NUMA 2441#ifdef CONFIG_NUMA
2441 /* 2442 /*
@@ -2446,20 +2447,29 @@ void __init kmem_cache_init(void)
2446 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 2447 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2447 sizeof(struct kmem_cache_node), GFP_KERNEL); 2448 sizeof(struct kmem_cache_node), GFP_KERNEL);
2448 kmalloc_caches[0].refcount = -1; 2449 kmalloc_caches[0].refcount = -1;
2450 caches++;
2449#endif 2451#endif
2450 2452
2451 /* Able to allocate the per node structures */ 2453 /* Able to allocate the per node structures */
2452 slab_state = PARTIAL; 2454 slab_state = PARTIAL;
2453 2455
2454 /* Caches that are not of the two-to-the-power-of size */ 2456 /* Caches that are not of the two-to-the-power-of size */
2455 create_kmalloc_cache(&kmalloc_caches[1], 2457 if (KMALLOC_MIN_SIZE <= 64) {
2458 create_kmalloc_cache(&kmalloc_caches[1],
2456 "kmalloc-96", 96, GFP_KERNEL); 2459 "kmalloc-96", 96, GFP_KERNEL);
2457 create_kmalloc_cache(&kmalloc_caches[2], 2460 caches++;
2461 }
2462 if (KMALLOC_MIN_SIZE <= 128) {
2463 create_kmalloc_cache(&kmalloc_caches[2],
2458 "kmalloc-192", 192, GFP_KERNEL); 2464 "kmalloc-192", 192, GFP_KERNEL);
2465 caches++;
2466 }
2459 2467
2460 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 2468 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
2461 create_kmalloc_cache(&kmalloc_caches[i], 2469 create_kmalloc_cache(&kmalloc_caches[i],
2462 "kmalloc", 1 << i, GFP_KERNEL); 2470 "kmalloc", 1 << i, GFP_KERNEL);
2471 caches++;
2472 }
2463 2473
2464 slab_state = UP; 2474 slab_state = UP;
2465 2475
@@ -2476,8 +2486,8 @@ void __init kmem_cache_init(void)
2476 nr_cpu_ids * sizeof(struct page *); 2486 nr_cpu_ids * sizeof(struct page *);
2477 2487
2478 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 2488 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2479 " Processors=%d, Nodes=%d\n", 2489 " CPUs=%d, Nodes=%d\n",
2480 KMALLOC_SHIFT_HIGH, cache_line_size(), 2490 caches, cache_line_size(),
2481 slub_min_order, slub_max_order, slub_min_objects, 2491 slub_min_order, slub_max_order, slub_min_objects,
2482 nr_cpu_ids, nr_node_ids); 2492 nr_cpu_ids, nr_node_ids);
2483} 2493}