aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:08 -0500
commit2c59dd6544212faa5ce761920d2251f4152f408d (patch)
treec2547eb50205b72368e0b4758fc7c9a0111238a5 /mm/slab.c
parent9e5e8deca74603357626471a9b44f05dea9e32b1 (diff)
slab: Common Kmalloc cache determination
Extract the optimized lookup functions from slub and put them into slab_common.c. Then make slab use these functions as well. Joonsoo notes that this fixes some issues with constant folding which also reduces the code size for slub. https://lkml.org/lkml/2012/10/20/82 Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c40
1 files changed, 3 insertions, 37 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 08ba44f81a28..62629b11df38 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -656,40 +656,6 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
656 return cachep->array[smp_processor_id()]; 656 return cachep->array[smp_processor_id()];
657} 657}
658 658
659static inline struct kmem_cache *__find_general_cachep(size_t size,
660 gfp_t gfpflags)
661{
662 int i;
663
664#if DEBUG
665 /* This happens if someone tries to call
666 * kmem_cache_create(), or __kmalloc(), before
667 * the generic caches are initialized.
668 */
669 BUG_ON(kmalloc_caches[INDEX_AC] == NULL);
670#endif
671 if (!size)
672 return ZERO_SIZE_PTR;
673
674 i = kmalloc_index(size);
675
676 /*
677 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
678 * has cs_{dma,}cachep==NULL. Thus no special case
679 * for large kmalloc calls required.
680 */
681#ifdef CONFIG_ZONE_DMA
682 if (unlikely(gfpflags & GFP_DMA))
683 return kmalloc_dma_caches[i];
684#endif
685 return kmalloc_caches[i];
686}
687
688static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
689{
690 return __find_general_cachep(size, gfpflags);
691}
692
693static size_t slab_mgmt_size(size_t nr_objs, size_t align) 659static size_t slab_mgmt_size(size_t nr_objs, size_t align)
694{ 660{
695 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 661 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
@@ -2426,7 +2392,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2426 cachep->reciprocal_buffer_size = reciprocal_value(size); 2392 cachep->reciprocal_buffer_size = reciprocal_value(size);
2427 2393
2428 if (flags & CFLGS_OFF_SLAB) { 2394 if (flags & CFLGS_OFF_SLAB) {
2429 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2395 cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
2430 /* 2396 /*
2431 * This is a possibility for one of the malloc_sizes caches. 2397 * This is a possibility for one of the malloc_sizes caches.
2432 * But since we go off slab only for object size greater than 2398 * But since we go off slab only for object size greater than
@@ -3729,7 +3695,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3729{ 3695{
3730 struct kmem_cache *cachep; 3696 struct kmem_cache *cachep;
3731 3697
3732 cachep = kmem_find_general_cachep(size, flags); 3698 cachep = kmalloc_slab(size, flags);
3733 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3699 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3734 return cachep; 3700 return cachep;
3735 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3701 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
@@ -3774,7 +3740,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3774 * Then kmalloc uses the uninlined functions instead of the inline 3740 * Then kmalloc uses the uninlined functions instead of the inline
3775 * functions. 3741 * functions.
3776 */ 3742 */
3777 cachep = __find_general_cachep(size, flags); 3743 cachep = kmalloc_slab(size, flags);
3778 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3744 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3779 return cachep; 3745 return cachep;
3780 ret = slab_alloc(cachep, flags, caller); 3746 ret = slab_alloc(cachep, flags, caller);