aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2008-07-03 10:14:26 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-07-03 12:01:55 -0400
commit41d54d3bf83f62d3ff5948cb788fe6007e66a0d0 (patch)
tree7aef950af184b3f9b406127415be1f0b52bca8c7
parent481c5346d0981940ee63037eb53e4e37b0735c10 (diff)
slub: Do not use 192 byte sized cache if minimum alignment is 128 byte
The 192 byte cache is not necessary if we have a basic alignment of 128 byte. If it would be used then the 192 would be aligned to the next 128 byte boundary which would result in another 256 byte cache. Two 256 kmalloc caches cause sysfs to complain about a duplicate entry. MIPS needs 128 byte aligned kmalloc caches and spits out warnings on boot without this patch. Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--mm/slub.c12
2 files changed, 12 insertions, 2 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 71e43a12ebbb..cef6f8fddd7d 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -137,10 +137,12 @@ static __always_inline int kmalloc_index(size_t size)
137 if (size <= KMALLOC_MIN_SIZE) 137 if (size <= KMALLOC_MIN_SIZE)
138 return KMALLOC_SHIFT_LOW; 138 return KMALLOC_SHIFT_LOW;
139 139
140#if KMALLOC_MIN_SIZE <= 64
140 if (size > 64 && size <= 96) 141 if (size > 64 && size <= 96)
141 return 1; 142 return 1;
142 if (size > 128 && size <= 192) 143 if (size > 128 && size <= 192)
143 return 2; 144 return 2;
145#endif
144 if (size <= 8) return 3; 146 if (size <= 8) return 3;
145 if (size <= 16) return 4; 147 if (size <= 16) return 4;
146 if (size <= 32) return 5; 148 if (size <= 32) return 5;
diff --git a/mm/slub.c b/mm/slub.c
index 0987d1cd943c..2c9a62d1f429 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2995,8 +2995,6 @@ void __init kmem_cache_init(void)
2995 create_kmalloc_cache(&kmalloc_caches[1], 2995 create_kmalloc_cache(&kmalloc_caches[1],
2996 "kmalloc-96", 96, GFP_KERNEL); 2996 "kmalloc-96", 96, GFP_KERNEL);
2997 caches++; 2997 caches++;
2998 }
2999 if (KMALLOC_MIN_SIZE <= 128) {
3000 create_kmalloc_cache(&kmalloc_caches[2], 2998 create_kmalloc_cache(&kmalloc_caches[2],
3001 "kmalloc-192", 192, GFP_KERNEL); 2999 "kmalloc-192", 192, GFP_KERNEL);
3002 caches++; 3000 caches++;
@@ -3026,6 +3024,16 @@ void __init kmem_cache_init(void)
3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3024 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3025 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3028 3026
3027 if (KMALLOC_MIN_SIZE == 128) {
3028 /*
3029 * The 192 byte sized cache is not used if the alignment
3030 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3031 * instead.
3032 */
3033 for (i = 128 + 8; i <= 192; i += 8)
3034 size_index[(i - 1) / 8] = 8;
3035 }
3036
3029 slab_state = UP; 3037 slab_state = UP;
3030 3038
3031 /* Provide the correct kmalloc names now that the caches are up */ 3039 /* Provide the correct kmalloc names now that the caches are up */