aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:08 -0500
commit2c59dd6544212faa5ce761920d2251f4152f408d (patch)
treec2547eb50205b72368e0b4758fc7c9a0111238a5 /mm/slab_common.c
parent9e5e8deca74603357626471a9b44f05dea9e32b1 (diff)
slab: Common Kmalloc cache determination
Extract the optimized lookup functions from slub and put them into slab_common.c. Then make slab use these functions as well. Joonsoo notes that this fixes some issues with constant folding which also reduces the code size for slub. https://lkml.org/lkml/2012/10/20/82 Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c105
1 files changed, 103 insertions, 2 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2b0ebb6d071d..6d73f0b7f21c 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -328,6 +328,68 @@ EXPORT_SYMBOL(kmalloc_dma_caches);
328#endif 328#endif
329 329
330/* 330/*
331 * Conversion table for small slabs sizes / 8 to the index in the
332 * kmalloc array. This is necessary for slabs < 192 since we have non power
333 * of two cache sizes there. The size of larger slabs can be determined using
334 * fls.
335 */
336static s8 size_index[24] = {
337 3, /* 8 */
338 4, /* 16 */
339 5, /* 24 */
340 5, /* 32 */
341 6, /* 40 */
342 6, /* 48 */
343 6, /* 56 */
344 6, /* 64 */
345 1, /* 72 */
346 1, /* 80 */
347 1, /* 88 */
348 1, /* 96 */
349 7, /* 104 */
350 7, /* 112 */
351 7, /* 120 */
352 7, /* 128 */
353 2, /* 136 */
354 2, /* 144 */
355 2, /* 152 */
356 2, /* 160 */
357 2, /* 168 */
358 2, /* 176 */
359 2, /* 184 */
360 2 /* 192 */
361};
362
363static inline int size_index_elem(size_t bytes)
364{
365 return (bytes - 1) / 8;
366}
367
368/*
369 * Find the kmem_cache structure that serves a given size of
370 * allocation
371 */
372struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
373{
374 int index;
375
376 if (size <= 192) {
377 if (!size)
378 return ZERO_SIZE_PTR;
379
380 index = size_index[size_index_elem(size)];
381 } else
382 index = fls(size - 1);
383
384#ifdef CONFIG_ZONE_DMA
385 if (unlikely((flags & SLAB_CACHE_DMA)))
386 return kmalloc_dma_caches[index];
387
388#endif
389 return kmalloc_caches[index];
390}
391
392/*
331 * Create the kmalloc array. Some of the regular kmalloc arrays 393 * Create the kmalloc array. Some of the regular kmalloc arrays
332 * may already have been created because they were needed to 394 * may already have been created because they were needed to
333 * enable allocations for slab creation. 395 * enable allocations for slab creation.
@@ -336,6 +398,47 @@ void __init create_kmalloc_caches(unsigned long flags)
336{ 398{
337 int i; 399 int i;
338 400
401 /*
402 * Patch up the size_index table if we have strange large alignment
403 * requirements for the kmalloc array. This is only the case for
404 * MIPS it seems. The standard arches will not generate any code here.
405 *
406 * Largest permitted alignment is 256 bytes due to the way we
407 * handle the index determination for the smaller caches.
408 *
409 * Make sure that nothing crazy happens if someone starts tinkering
410 * around with ARCH_KMALLOC_MINALIGN
411 */
412 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
413 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
414
415 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
416 int elem = size_index_elem(i);
417
418 if (elem >= ARRAY_SIZE(size_index))
419 break;
420 size_index[elem] = KMALLOC_SHIFT_LOW;
421 }
422
423 if (KMALLOC_MIN_SIZE >= 64) {
424 /*
425 * The 96 byte size cache is not used if the alignment
426 * is 64 byte.
427 */
428 for (i = 64 + 8; i <= 96; i += 8)
429 size_index[size_index_elem(i)] = 7;
430
431 }
432
433 if (KMALLOC_MIN_SIZE >= 128) {
434 /*
435 * The 192 byte sized cache is not used if the alignment
436 * is 128 byte. Redirect kmalloc to use the 256 byte cache
437 * instead.
438 */
439 for (i = 128 + 8; i <= 192; i += 8)
440 size_index[size_index_elem(i)] = 8;
441 }
339 /* Caches that are not of the two-to-the-power-of size */ 442 /* Caches that are not of the two-to-the-power-of size */
340 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1]) 443 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1])
341 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags); 444 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
@@ -379,8 +482,6 @@ void __init create_kmalloc_caches(unsigned long flags)
379 } 482 }
380#endif 483#endif
381} 484}
382
383
384#endif /* !CONFIG_SLOB */ 485#endif /* !CONFIG_SLOB */
385 486
386 487