aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 07:03:26 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:01 -0400
commitf1b263393626fe66bee34ccdbf0487cd377e0213 (patch)
treec144c6f8c0fd8f5eeeac1504bf7204c09938135f /mm/slub.c
parentdfce8648d64c07eade40d456d59cb4bfcbba008c (diff)
SLUB: faster more efficient slab determination for __kmalloc
kmalloc_index is a long series of comparisons. The attempt to replace kmalloc_index with something more efficient like ilog2 failed due to compiler issues with constant folding on gcc 3.3 / powerpc. kmalloc_index()'es long list of comparisons works fine for constant folding since all the comparisons are optimized away. However, SLUB also uses kmalloc_index to determine the slab to use for the __kmalloc_xxx functions. This leads to a large set of comparisons in get_slab(). The patch here allows to get rid of that list of comparisons in get_slab(): 1. If the requested size is larger than 192 then we can simply use fls to determine the slab index since all larger slabs are of the power of two type. 2. If the requested size is smaller then we cannot use fls since there are non power of two caches to be considered. However, the sizes are in a managable range. So we divide the size by 8. Then we have only 24 possibilities left and then we simply look up the kmalloc index in a table. Code size of slub.o decreases by more than 200 bytes through this patch. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c71
1 files changed, 64 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index f93adb915c00..71988f9b9c55 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2313,20 +2313,59 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2313} 2313}
2314#endif 2314#endif
2315 2315
2316/*
2317 * Conversion table for small slabs sizes / 8 to the index in the
2318 * kmalloc array. This is necessary for slabs < 192 since we have non power
2319 * of two cache sizes there. The size of larger slabs can be determined using
2320 * fls.
2321 */
2322static s8 size_index[24] = {
2323 3, /* 8 */
2324 4, /* 16 */
2325 5, /* 24 */
2326 5, /* 32 */
2327 6, /* 40 */
2328 6, /* 48 */
2329 6, /* 56 */
2330 6, /* 64 */
2331 1, /* 72 */
2332 1, /* 80 */
2333 1, /* 88 */
2334 1, /* 96 */
2335 7, /* 104 */
2336 7, /* 112 */
2337 7, /* 120 */
2338 7, /* 128 */
2339 2, /* 136 */
2340 2, /* 144 */
2341 2, /* 152 */
2342 2, /* 160 */
2343 2, /* 168 */
2344 2, /* 176 */
2345 2, /* 184 */
2346 2 /* 192 */
2347};
2348
2316static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2349static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2317{ 2350{
2318 int index = kmalloc_index(size); 2351 int index;
2319 2352
2320 if (!index) 2353 if (size <= 192) {
2321 return ZERO_SIZE_PTR; 2354 if (!size)
2355 return ZERO_SIZE_PTR;
2322 2356
2323 /* Allocation too large? */ 2357 index = size_index[(size - 1) / 8];
2324 if (index < 0) 2358 } else {
2325 return NULL; 2359 if (size > KMALLOC_MAX_SIZE)
2360 return NULL;
2361
2362 index = fls(size - 1);
2363 }
2326 2364
2327#ifdef CONFIG_ZONE_DMA 2365#ifdef CONFIG_ZONE_DMA
2328 if ((flags & SLUB_DMA)) 2366 if (unlikely((flags & SLUB_DMA)))
2329 return dma_kmalloc_cache(index, flags); 2367 return dma_kmalloc_cache(index, flags);
2368
2330#endif 2369#endif
2331 return &kmalloc_caches[index]; 2370 return &kmalloc_caches[index];
2332} 2371}
@@ -2532,6 +2571,24 @@ void __init kmem_cache_init(void)
2532 caches++; 2571 caches++;
2533 } 2572 }
2534 2573
2574
2575 /*
2576 * Patch up the size_index table if we have strange large alignment
2577 * requirements for the kmalloc array. This is only the case for
2578 * mips it seems. The standard arches will not generate any code here.
2579 *
2580 * Largest permitted alignment is 256 bytes due to the way we
2581 * handle the index determination for the smaller caches.
2582 *
2583 * Make sure that nothing crazy happens if someone starts tinkering
2584 * around with ARCH_KMALLOC_MINALIGN
2585 */
2586 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2587 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
2588
2589 for (i = 8; i < KMALLOC_MIN_SIZE;i++)
2590 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
2591
2535 slab_state = UP; 2592 slab_state = UP;
2536 2593
2537 /* Provide the correct kmalloc names now that the caches are up */ 2594 /* Provide the correct kmalloc names now that the caches are up */