diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2013-12-02 03:49:41 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2014-02-08 05:12:06 -0500 |
commit | f315e3fa1cf5b3317fc948708645fff889ce1e63 (patch) | |
tree | 2c7e3db6b372504ca115c5dcd2b23a8996e4a06a | |
parent | e5c58dfdcbd36f6b4c4c92c31cf6753d22da630a (diff) |
slab: restrict the number of objects in a slab
To prepare to implement byte sized index for managing the freelist
of a slab, we should restrict the number of objects in a slab to be less
or equal to 256, since byte only represent 256 different values.
Setting the size of object to value equal or more than newly introduced
SLAB_OBJ_MIN_SIZE ensures that the number of objects in a slab is less or
equal to 256 for a slab with 1 page.
If page size is rather larger than 4096, above assumption would be wrong.
In this case, we would fall back on 2 bytes sized index.
If minimum size of kmalloc is less than 16, we use it as minimum object
size and give up this optimization.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | include/linux/slab.h | 11 | ||||
-rw-r--r-- | mm/slab.c | 21 |
2 files changed, 32 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 9260abdd67df..d015dec02bf3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -201,6 +201,17 @@ struct kmem_cache { | |||
201 | #ifndef KMALLOC_SHIFT_LOW | 201 | #ifndef KMALLOC_SHIFT_LOW |
202 | #define KMALLOC_SHIFT_LOW 5 | 202 | #define KMALLOC_SHIFT_LOW 5 |
203 | #endif | 203 | #endif |
204 | |||
205 | /* | ||
206 | * This restriction comes from byte sized index implementation. | ||
207 | * Page size is normally 2^12 bytes and, in this case, if we want to use | ||
208 | * byte sized index which can represent 2^8 entries, the size of the object | ||
209 | * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. | ||
210 | * If minimum size of kmalloc is less than 16, we use it as minimum object | ||
211 | * size and give up to use byte sized index. | ||
212 | */ | ||
213 | #define SLAB_OBJ_MIN_SIZE (KMALLOC_SHIFT_LOW < 4 ? \ | ||
214 | (1 << KMALLOC_SHIFT_LOW) : 16) | ||
204 | #endif | 215 | #endif |
205 | 216 | ||
206 | #ifdef CONFIG_SLUB | 217 | #ifdef CONFIG_SLUB |
@@ -157,6 +157,17 @@ | |||
157 | #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN | 157 | #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN |
158 | #endif | 158 | #endif |
159 | 159 | ||
160 | #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ | ||
161 | <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) | ||
162 | |||
163 | #if FREELIST_BYTE_INDEX | ||
164 | typedef unsigned char freelist_idx_t; | ||
165 | #else | ||
166 | typedef unsigned short freelist_idx_t; | ||
167 | #endif | ||
168 | |||
169 | #define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) | ||
170 | |||
160 | /* | 171 | /* |
161 | * true if a page was allocated from pfmemalloc reserves for network-based | 172 | * true if a page was allocated from pfmemalloc reserves for network-based |
162 | * swap | 173 | * swap |
@@ -2016,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2016 | if (!num) | 2027 | if (!num) |
2017 | continue; | 2028 | continue; |
2018 | 2029 | ||
2030 | /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ | ||
2031 | if (num > SLAB_OBJ_MAX_NUM) | ||
2032 | break; | ||
2033 | |||
2019 | if (flags & CFLGS_OFF_SLAB) { | 2034 | if (flags & CFLGS_OFF_SLAB) { |
2020 | /* | 2035 | /* |
2021 | * Max number of objs-per-slab for caches which | 2036 | * Max number of objs-per-slab for caches which |
@@ -2258,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2258 | flags |= CFLGS_OFF_SLAB; | 2273 | flags |= CFLGS_OFF_SLAB; |
2259 | 2274 | ||
2260 | size = ALIGN(size, cachep->align); | 2275 | size = ALIGN(size, cachep->align); |
2276 | /* | ||
2277 | * We should restrict the number of objects in a slab to implement | ||
2278 | * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. | ||
2279 | */ | ||
2280 | if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) | ||
2281 | size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); | ||
2261 | 2282 | ||
2262 | left_over = calculate_slab_order(cachep, size, cachep->align, flags); | 2283 | left_over = calculate_slab_order(cachep, size, cachep->align, flags); |
2263 | 2284 | ||