aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-12-02 03:49:39 -0500
committerPekka Enberg <penberg@kernel.org>2014-02-08 05:10:32 -0500
commit9cef2e2b6589406562bf12a9a633d7d7630340a1 (patch)
tree1d3d05a81c2a786451be20a97f09ea13ae70b81b /mm
parent38dbfb59d1175ef458d006556061adeaa8751b72 (diff)
slab: factor out calculate nr objects in cache_estimate
This logic is not simple to understand so that making separate function helping readability. Additionally, we can use this change in the following patch which implement for freelist to have another sized index in according to nr objects. Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/mm/slab.c b/mm/slab.c
index b264214c77ea..f81176dd0a90 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -565,9 +565,31 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
565 return cachep->array[smp_processor_id()]; 565 return cachep->array[smp_processor_id()];
566} 566}
567 567
568static size_t slab_mgmt_size(size_t nr_objs, size_t align) 568static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
569 size_t idx_size, size_t align)
569{ 570{
570 return ALIGN(nr_objs * sizeof(unsigned int), align); 571 int nr_objs;
572 size_t freelist_size;
573
574 /*
575 * Ignore padding for the initial guess. The padding
576 * is at most @align-1 bytes, and @buffer_size is at
577 * least @align. In the worst case, this result will
578 * be one greater than the number of objects that fit
579 * into the memory allocation when taking the padding
580 * into account.
581 */
582 nr_objs = slab_size / (buffer_size + idx_size);
583
584 /*
585 * This calculated number will be either the right
586 * amount, or one greater than what we want.
587 */
588 freelist_size = slab_size - nr_objs * buffer_size;
589 if (freelist_size < ALIGN(nr_objs * idx_size, align))
590 nr_objs--;
591
592 return nr_objs;
571} 593}
572 594
573/* 595/*
@@ -600,25 +622,9 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
600 nr_objs = slab_size / buffer_size; 622 nr_objs = slab_size / buffer_size;
601 623
602 } else { 624 } else {
603 /* 625 nr_objs = calculate_nr_objs(slab_size, buffer_size,
604 * Ignore padding for the initial guess. The padding 626 sizeof(unsigned int), align);
605 * is at most @align-1 bytes, and @buffer_size is at 627 mgmt_size = ALIGN(nr_objs * sizeof(unsigned int), align);
606 * least @align. In the worst case, this result will
607 * be one greater than the number of objects that fit
608 * into the memory allocation when taking the padding
609 * into account.
610 */
611 nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int));
612
613 /*
614 * This calculated number will be either the right
615 * amount, or one greater than what we want.
616 */
617 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
618 > slab_size)
619 nr_objs--;
620
621 mgmt_size = slab_mgmt_size(nr_objs, align);
622 } 628 }
623 *num = nr_objs; 629 *num = nr_objs;
624 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 630 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;