aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2011-03-10 02:21:48 -0500
committerPekka Enberg <penberg@kernel.org>2011-03-11 11:06:34 -0500
commitab9a0f196f2f4f080df54402493ea3dc31b5243e (patch)
tree849ce2a8bf2c2574b8ea9a368873aebfb25186b0 /mm/slub.c
parenta5abba989deceb731047425812d268daf7536575 (diff)
slub: automatically reserve bytes at the end of slab
There is no "struct" for slub's slab, it shares with struct page. But struct page is very small, it is insufficient when we need to add some metadata for slab. So we add a field "reserved" to struct kmem_cache, when a slab is allocated, kmem_cache->reserved bytes are automatically reserved at the end of the slab for slab's metadata. Changed from v1: Export the reserved field via sysfs Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c47
1 files changed, 30 insertions, 17 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e15aa7f193c9..d3d17677bab5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -281,11 +281,16 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
281 return (p - addr) / s->size; 281 return (p - addr) / s->size;
282} 282}
283 283
284static inline int order_objects(int order, unsigned long size, int reserved)
285{
286 return ((PAGE_SIZE << order) - reserved) / size;
287}
288
284static inline struct kmem_cache_order_objects oo_make(int order, 289static inline struct kmem_cache_order_objects oo_make(int order,
285 unsigned long size) 290 unsigned long size, int reserved)
286{ 291{
287 struct kmem_cache_order_objects x = { 292 struct kmem_cache_order_objects x = {
288 (order << OO_SHIFT) + (PAGE_SIZE << order) / size 293 (order << OO_SHIFT) + order_objects(order, size, reserved)
289 }; 294 };
290 295
291 return x; 296 return x;
@@ -617,7 +622,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
617 return 1; 622 return 1;
618 623
619 start = page_address(page); 624 start = page_address(page);
620 length = (PAGE_SIZE << compound_order(page)); 625 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
621 end = start + length; 626 end = start + length;
622 remainder = length % s->size; 627 remainder = length % s->size;
623 if (!remainder) 628 if (!remainder)
@@ -698,7 +703,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
698 return 0; 703 return 0;
699 } 704 }
700 705
701 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 706 maxobj = order_objects(compound_order(page), s->size, s->reserved);
702 if (page->objects > maxobj) { 707 if (page->objects > maxobj) {
703 slab_err(s, page, "objects %u > max %u", 708 slab_err(s, page, "objects %u > max %u",
704 s->name, page->objects, maxobj); 709 s->name, page->objects, maxobj);
@@ -748,7 +753,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
748 nr++; 753 nr++;
749 } 754 }
750 755
751 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 756 max_objects = order_objects(compound_order(page), s->size, s->reserved);
752 if (max_objects > MAX_OBJS_PER_PAGE) 757 if (max_objects > MAX_OBJS_PER_PAGE)
753 max_objects = MAX_OBJS_PER_PAGE; 758 max_objects = MAX_OBJS_PER_PAGE;
754 759
@@ -1988,13 +1993,13 @@ static int slub_nomerge;
1988 * the smallest order which will fit the object. 1993 * the smallest order which will fit the object.
1989 */ 1994 */
1990static inline int slab_order(int size, int min_objects, 1995static inline int slab_order(int size, int min_objects,
1991 int max_order, int fract_leftover) 1996 int max_order, int fract_leftover, int reserved)
1992{ 1997{
1993 int order; 1998 int order;
1994 int rem; 1999 int rem;
1995 int min_order = slub_min_order; 2000 int min_order = slub_min_order;
1996 2001
1997 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) 2002 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
1998 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 2003 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1999 2004
2000 for (order = max(min_order, 2005 for (order = max(min_order,
@@ -2003,10 +2008,10 @@ static inline int slab_order(int size, int min_objects,
2003 2008
2004 unsigned long slab_size = PAGE_SIZE << order; 2009 unsigned long slab_size = PAGE_SIZE << order;
2005 2010
2006 if (slab_size < min_objects * size) 2011 if (slab_size < min_objects * size + reserved)
2007 continue; 2012 continue;
2008 2013
2009 rem = slab_size % size; 2014 rem = (slab_size - reserved) % size;
2010 2015
2011 if (rem <= slab_size / fract_leftover) 2016 if (rem <= slab_size / fract_leftover)
2012 break; 2017 break;
@@ -2016,7 +2021,7 @@ static inline int slab_order(int size, int min_objects,
2016 return order; 2021 return order;
2017} 2022}
2018 2023
2019static inline int calculate_order(int size) 2024static inline int calculate_order(int size, int reserved)
2020{ 2025{
2021 int order; 2026 int order;
2022 int min_objects; 2027 int min_objects;
@@ -2034,14 +2039,14 @@ static inline int calculate_order(int size)
2034 min_objects = slub_min_objects; 2039 min_objects = slub_min_objects;
2035 if (!min_objects) 2040 if (!min_objects)
2036 min_objects = 4 * (fls(nr_cpu_ids) + 1); 2041 min_objects = 4 * (fls(nr_cpu_ids) + 1);
2037 max_objects = (PAGE_SIZE << slub_max_order)/size; 2042 max_objects = order_objects(slub_max_order, size, reserved);
2038 min_objects = min(min_objects, max_objects); 2043 min_objects = min(min_objects, max_objects);
2039 2044
2040 while (min_objects > 1) { 2045 while (min_objects > 1) {
2041 fraction = 16; 2046 fraction = 16;
2042 while (fraction >= 4) { 2047 while (fraction >= 4) {
2043 order = slab_order(size, min_objects, 2048 order = slab_order(size, min_objects,
2044 slub_max_order, fraction); 2049 slub_max_order, fraction, reserved);
2045 if (order <= slub_max_order) 2050 if (order <= slub_max_order)
2046 return order; 2051 return order;
2047 fraction /= 2; 2052 fraction /= 2;
@@ -2053,14 +2058,14 @@ static inline int calculate_order(int size)
2053 * We were unable to place multiple objects in a slab. Now 2058 * We were unable to place multiple objects in a slab. Now
2054 * lets see if we can place a single object there. 2059 * lets see if we can place a single object there.
2055 */ 2060 */
2056 order = slab_order(size, 1, slub_max_order, 1); 2061 order = slab_order(size, 1, slub_max_order, 1, reserved);
2057 if (order <= slub_max_order) 2062 if (order <= slub_max_order)
2058 return order; 2063 return order;
2059 2064
2060 /* 2065 /*
2061 * Doh this slab cannot be placed using slub_max_order. 2066 * Doh this slab cannot be placed using slub_max_order.
2062 */ 2067 */
2063 order = slab_order(size, 1, MAX_ORDER, 1); 2068 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2064 if (order < MAX_ORDER) 2069 if (order < MAX_ORDER)
2065 return order; 2070 return order;
2066 return -ENOSYS; 2071 return -ENOSYS;
@@ -2311,7 +2316,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2311 if (forced_order >= 0) 2316 if (forced_order >= 0)
2312 order = forced_order; 2317 order = forced_order;
2313 else 2318 else
2314 order = calculate_order(size); 2319 order = calculate_order(size, s->reserved);
2315 2320
2316 if (order < 0) 2321 if (order < 0)
2317 return 0; 2322 return 0;
@@ -2329,8 +2334,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2329 /* 2334 /*
2330 * Determine the number of objects per slab 2335 * Determine the number of objects per slab
2331 */ 2336 */
2332 s->oo = oo_make(order, size); 2337 s->oo = oo_make(order, size, s->reserved);
2333 s->min = oo_make(get_order(size), size); 2338 s->min = oo_make(get_order(size), size, s->reserved);
2334 if (oo_objects(s->oo) > oo_objects(s->max)) 2339 if (oo_objects(s->oo) > oo_objects(s->max))
2335 s->max = s->oo; 2340 s->max = s->oo;
2336 2341
@@ -2349,6 +2354,7 @@ static int kmem_cache_open(struct kmem_cache *s,
2349 s->objsize = size; 2354 s->objsize = size;
2350 s->align = align; 2355 s->align = align;
2351 s->flags = kmem_cache_flags(size, flags, name, ctor); 2356 s->flags = kmem_cache_flags(size, flags, name, ctor);
2357 s->reserved = 0;
2352 2358
2353 if (!calculate_sizes(s, -1)) 2359 if (!calculate_sizes(s, -1))
2354 goto error; 2360 goto error;
@@ -4017,6 +4023,12 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4017} 4023}
4018SLAB_ATTR_RO(destroy_by_rcu); 4024SLAB_ATTR_RO(destroy_by_rcu);
4019 4025
4026static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4027{
4028 return sprintf(buf, "%d\n", s->reserved);
4029}
4030SLAB_ATTR_RO(reserved);
4031
4020#ifdef CONFIG_SLUB_DEBUG 4032#ifdef CONFIG_SLUB_DEBUG
4021static ssize_t slabs_show(struct kmem_cache *s, char *buf) 4033static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4022{ 4034{
@@ -4303,6 +4315,7 @@ static struct attribute *slab_attrs[] = {
4303 &reclaim_account_attr.attr, 4315 &reclaim_account_attr.attr,
4304 &destroy_by_rcu_attr.attr, 4316 &destroy_by_rcu_attr.attr,
4305 &shrink_attr.attr, 4317 &shrink_attr.attr,
4318 &reserved_attr.attr,
4306#ifdef CONFIG_SLUB_DEBUG 4319#ifdef CONFIG_SLUB_DEBUG
4307 &total_objects_attr.attr, 4320 &total_objects_attr.attr,
4308 &slabs_attr.attr, 4321 &slabs_attr.attr,