aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-14 12:11:41 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-27 11:28:18 -0400
commit06b285dc3d6194abe79ab9dcaaab703d6f75627c (patch)
tree4443514e99740054028f6eb035c796a74a540ab1
parent319d1e240683d37924ea8977c91730c3393fd453 (diff)
slub: Make the order configurable for each slab cache
Makes /sys/kernel/slab/<slabname>/order writable. The allocation order of a slab cache can then be changed dynamically during runtime. This can be used to override the objects per slabs value establisheed with the slub_min_objects setting that was manually specified or calculated on bootup. The changes of the slab order can occur while allocate_slab() runs. Allocate slab needs the order and the number of slab objects that are both changed by the change of order. Both are put into a single word (struct kmem_cache_order_objects). They can then be atomically updated and retrieved. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--mm/slub.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index de6f38761d1f..23a2683d6c9f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2218,7 +2218,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2218 * calculate_sizes() determines the order and the distribution of data within 2218 * calculate_sizes() determines the order and the distribution of data within
2219 * a slab object. 2219 * a slab object.
2220 */ 2220 */
2221static int calculate_sizes(struct kmem_cache *s) 2221static int calculate_sizes(struct kmem_cache *s, int forced_order)
2222{ 2222{
2223 unsigned long flags = s->flags; 2223 unsigned long flags = s->flags;
2224 unsigned long size = s->objsize; 2224 unsigned long size = s->objsize;
@@ -2307,7 +2307,10 @@ static int calculate_sizes(struct kmem_cache *s)
2307 */ 2307 */
2308 size = ALIGN(size, align); 2308 size = ALIGN(size, align);
2309 s->size = size; 2309 s->size = size;
2310 order = calculate_order(size); 2310 if (forced_order >= 0)
2311 order = forced_order;
2312 else
2313 order = calculate_order(size);
2311 2314
2312 if (order < 0) 2315 if (order < 0)
2313 return 0; 2316 return 0;
@@ -2346,7 +2349,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2346 s->align = align; 2349 s->align = align;
2347 s->flags = kmem_cache_flags(size, flags, name, ctor); 2350 s->flags = kmem_cache_flags(size, flags, name, ctor);
2348 2351
2349 if (!calculate_sizes(s)) 2352 if (!calculate_sizes(s, -1))
2350 goto error; 2353 goto error;
2351 2354
2352 s->refcount = 1; 2355 s->refcount = 1;
@@ -3833,11 +3836,23 @@ static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3833} 3836}
3834SLAB_ATTR_RO(objs_per_slab); 3837SLAB_ATTR_RO(objs_per_slab);
3835 3838
3839static ssize_t order_store(struct kmem_cache *s,
3840 const char *buf, size_t length)
3841{
3842 int order = simple_strtoul(buf, NULL, 10);
3843
3844 if (order > slub_max_order || order < slub_min_order)
3845 return -EINVAL;
3846
3847 calculate_sizes(s, order);
3848 return length;
3849}
3850
3836static ssize_t order_show(struct kmem_cache *s, char *buf) 3851static ssize_t order_show(struct kmem_cache *s, char *buf)
3837{ 3852{
3838 return sprintf(buf, "%d\n", oo_order(s->oo)); 3853 return sprintf(buf, "%d\n", oo_order(s->oo));
3839} 3854}
3840SLAB_ATTR_RO(order); 3855SLAB_ATTR(order);
3841 3856
3842static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3857static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3843{ 3858{
@@ -3971,7 +3986,7 @@ static ssize_t red_zone_store(struct kmem_cache *s,
3971 s->flags &= ~SLAB_RED_ZONE; 3986 s->flags &= ~SLAB_RED_ZONE;
3972 if (buf[0] == '1') 3987 if (buf[0] == '1')
3973 s->flags |= SLAB_RED_ZONE; 3988 s->flags |= SLAB_RED_ZONE;
3974 calculate_sizes(s); 3989 calculate_sizes(s, -1);
3975 return length; 3990 return length;
3976} 3991}
3977SLAB_ATTR(red_zone); 3992SLAB_ATTR(red_zone);
@@ -3990,7 +4005,7 @@ static ssize_t poison_store(struct kmem_cache *s,
3990 s->flags &= ~SLAB_POISON; 4005 s->flags &= ~SLAB_POISON;
3991 if (buf[0] == '1') 4006 if (buf[0] == '1')
3992 s->flags |= SLAB_POISON; 4007 s->flags |= SLAB_POISON;
3993 calculate_sizes(s); 4008 calculate_sizes(s, -1);
3994 return length; 4009 return length;
3995} 4010}
3996SLAB_ATTR(poison); 4011SLAB_ATTR(poison);
@@ -4009,7 +4024,7 @@ static ssize_t store_user_store(struct kmem_cache *s,
4009 s->flags &= ~SLAB_STORE_USER; 4024 s->flags &= ~SLAB_STORE_USER;
4010 if (buf[0] == '1') 4025 if (buf[0] == '1')
4011 s->flags |= SLAB_STORE_USER; 4026 s->flags |= SLAB_STORE_USER;
4012 calculate_sizes(s); 4027 calculate_sizes(s, -1);
4013 return length; 4028 return length;
4014} 4029}
4015SLAB_ATTR(store_user); 4030SLAB_ATTR(store_user);