aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-12-02 03:49:41 -0500
committerPekka Enberg <penberg@kernel.org>2014-02-08 05:12:06 -0500
commitf315e3fa1cf5b3317fc948708645fff889ce1e63 (patch)
tree2c7e3db6b372504ca115c5dcd2b23a8996e4a06a /mm
parente5c58dfdcbd36f6b4c4c92c31cf6753d22da630a (diff)
slab: restrict the number of objects in a slab
To prepare to implement byte sized index for managing the freelist of a slab, we should restrict the number of objects in a slab to be less or equal to 256, since byte only represent 256 different values. Setting the size of object to value equal or more than newly introduced SLAB_OBJ_MIN_SIZE ensures that the number of objects in a slab is less or equal to 256 for a slab with 1 page. If page size is rather larger than 4096, above assumption would be wrong. In this case, we would fall back on 2 bytes sized index. If minimum size of kmalloc is less than 16, we use it as minimum object size and give up this optimization. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 878354b26b72..9d4c7b50dfdc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -157,6 +157,17 @@
157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158#endif 158#endif
159 159
160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162
163#if FREELIST_BYTE_INDEX
164typedef unsigned char freelist_idx_t;
165#else
166typedef unsigned short freelist_idx_t;
167#endif
168
169#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
170
160/* 171/*
161 * true if a page was allocated from pfmemalloc reserves for network-based 172 * true if a page was allocated from pfmemalloc reserves for network-based
162 * swap 173 * swap
@@ -2016,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2016 if (!num) 2027 if (!num)
2017 continue; 2028 continue;
2018 2029
2030 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
2031 if (num > SLAB_OBJ_MAX_NUM)
2032 break;
2033
2019 if (flags & CFLGS_OFF_SLAB) { 2034 if (flags & CFLGS_OFF_SLAB) {
2020 /* 2035 /*
2021 * Max number of objs-per-slab for caches which 2036 * Max number of objs-per-slab for caches which
@@ -2258,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2258 flags |= CFLGS_OFF_SLAB; 2273 flags |= CFLGS_OFF_SLAB;
2259 2274
2260 size = ALIGN(size, cachep->align); 2275 size = ALIGN(size, cachep->align);
2276 /*
2277 * We should restrict the number of objects in a slab to implement
2278 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2279 */
2280 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2281 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2261 2282
2262 left_over = calculate_slab_order(cachep, size, cachep->align, flags); 2283 left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2263 2284