aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-15 17:54:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit70f75067b15659bb03404e75eded41011c67dc57 (patch)
tree62e7fc5d7802b82d1953be3b884a36aa860cbd49
parentb03a017bebc403d40aa53a092e79b3020786537d (diff)
mm/slab: avoid returning values by reference
Returing values by reference is bad practice. Instead, just use function return value. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Suggested-by: Christoph Lameter <cl@linux.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 85e394f5918c..4f4e6472db5b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -460,9 +460,10 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
460/* 460/*
461 * Calculate the number of objects and left-over bytes for a given buffer size. 461 * Calculate the number of objects and left-over bytes for a given buffer size.
462 */ 462 */
463static void cache_estimate(unsigned long gfporder, size_t buffer_size, 463static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
464 unsigned long flags, size_t *left_over, unsigned int *num) 464 unsigned long flags, size_t *left_over)
465{ 465{
466 unsigned int num;
466 size_t slab_size = PAGE_SIZE << gfporder; 467 size_t slab_size = PAGE_SIZE << gfporder;
467 468
468 /* 469 /*
@@ -483,13 +484,15 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
483 * correct alignment when allocated. 484 * correct alignment when allocated.
484 */ 485 */
485 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { 486 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
486 *num = slab_size / buffer_size; 487 num = slab_size / buffer_size;
487 *left_over = slab_size % buffer_size; 488 *left_over = slab_size % buffer_size;
488 } else { 489 } else {
489 *num = slab_size / (buffer_size + sizeof(freelist_idx_t)); 490 num = slab_size / (buffer_size + sizeof(freelist_idx_t));
490 *left_over = slab_size % 491 *left_over = slab_size %
491 (buffer_size + sizeof(freelist_idx_t)); 492 (buffer_size + sizeof(freelist_idx_t));
492 } 493 }
494
495 return num;
493} 496}
494 497
495#if DEBUG 498#if DEBUG
@@ -1893,7 +1896,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1893 unsigned int num; 1896 unsigned int num;
1894 size_t remainder; 1897 size_t remainder;
1895 1898
1896 cache_estimate(gfporder, size, flags, &remainder, &num); 1899 num = cache_estimate(gfporder, size, flags, &remainder);
1897 if (!num) 1900 if (!num)
1898 continue; 1901 continue;
1899 1902