aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-15 17:54:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit832a15d209cd260180407bde1af18965b21623f3 (patch)
treed94dbf93e5b2be7b868327659acbeb8c93ebae91 /mm/slab.c
parent2e6b3602168797fd4d80d86d208c4ba8fcfa3b8b (diff)
mm/slab: align cache size first before determination of OFF_SLAB candidate
Finding suitable OFF_SLAB candidate is more related to aligned cache size rather than original size. Same reasoning can be applied to the debug pagealloc candidate. So, this patch moves up alignment fixup to proper position. From that point, size is aligned so we can remove some alignment fixups. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index b3d91b048f44..d5dffc806f82 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2125,6 +2125,17 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2125 else 2125 else
2126 size += BYTES_PER_WORD; 2126 size += BYTES_PER_WORD;
2127 } 2127 }
2128#endif
2129
2130 size = ALIGN(size, cachep->align);
2131 /*
2132 * We should restrict the number of objects in a slab to implement
2133 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2134 */
2135 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2136 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2137
2138#if DEBUG
2128 /* 2139 /*
2129 * To activate debug pagealloc, off-slab management is necessary 2140 * To activate debug pagealloc, off-slab management is necessary
2130 * requirement. In early phase of initialization, small sized slab 2141 * requirement. In early phase of initialization, small sized slab
@@ -2135,8 +2146,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2135 if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && 2146 if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2136 !slab_early_init && size >= kmalloc_size(INDEX_NODE) && 2147 !slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
2137 size >= 256 && cachep->object_size > cache_line_size() && 2148 size >= 256 && cachep->object_size > cache_line_size() &&
2138 ALIGN(size, cachep->align) < PAGE_SIZE) { 2149 size < PAGE_SIZE) {
2139 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2150 cachep->obj_offset += PAGE_SIZE - size;
2140 size = PAGE_SIZE; 2151 size = PAGE_SIZE;
2141 } 2152 }
2142#endif 2153#endif
@@ -2148,20 +2159,13 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2148 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) 2159 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2149 */ 2160 */
2150 if (size >= OFF_SLAB_MIN_SIZE && !slab_early_init && 2161 if (size >= OFF_SLAB_MIN_SIZE && !slab_early_init &&
2151 !(flags & SLAB_NOLEAKTRACE)) 2162 !(flags & SLAB_NOLEAKTRACE)) {
2152 /* 2163 /*
2153 * Size is large, assume best to place the slab management obj 2164 * Size is large, assume best to place the slab management obj
2154 * off-slab (should allow better packing of objs). 2165 * off-slab (should allow better packing of objs).
2155 */ 2166 */
2156 flags |= CFLGS_OFF_SLAB; 2167 flags |= CFLGS_OFF_SLAB;
2157 2168 }
2158 size = ALIGN(size, cachep->align);
2159 /*
2160 * We should restrict the number of objects in a slab to implement
2161 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2162 */
2163 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2164 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2165 2169
2166 left_over = calculate_slab_order(cachep, size, flags); 2170 left_over = calculate_slab_order(cachep, size, flags);
2167 2171