aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-02-22 20:40:09 -0500
committerPekka Enberg <penberg@cs.helsinki.fi>2009-02-23 05:05:46 -0500
commit73d342b169db700b5a6ad626fe4b86911efec8db (patch)
tree6ddde0ce99c195eb51e42fae754f8b512e35ce53 /mm/slub.c
parent3b89d7d881a1dbb4da158f7eb5d6b3ceefc72810 (diff)
slub: add min_partial sysfs tunable
Now that a cache's min_partial has been moved to struct kmem_cache, it's possible to easily tune it from userspace by adding a sysfs attribute. It may not be desirable to keep a large number of partial slabs around if a cache is used infrequently and memory, especially when constrained by a cgroup, is scarce. It's better to allow userspace to set the minimum policy per cache instead of relying explicitly on kmem_cache_shrink(). The memory savings from simply moving min_partial from struct kmem_cache_node to struct kmem_cache is obviously not significant (unless maybe you're from SGI or something), at the largest it's # allocated caches * (MAX_NUMNODES - 1) * sizeof(unsigned long) The true savings occurs when userspace reduces the number of partial slabs that would otherwise be wasted, especially on machines with a large number of nodes (ia64 with CONFIG_NODES_SHIFT at 10 for default?). As well as the kernel estimates ideal values for n->min_partial and ensures it's within a sane range, userspace has no other input other than writing to /sys/kernel/slab/cache/shrink. There simply isn't any better heuristic to add when calculating the partial values for a better estimate that works for all possible caches. And since it's currently a static value, the user really has no way of reclaiming that wasted space, which can be significant when constrained by a cgroup (either cpusets or, later, memory controller slab limits) without shrinking it entirely. This also allows the user to specify that increased fragmentation and more partial slabs are actually desired to avoid the cost of allocating new slabs at runtime for specific caches. There's also no reason why this should be a per-struct kmem_cache_node value in the first place. You could argue that a machine would have such node size asymmetries that it should be specified on a per-node basis, but we know nobody is doing that right now since it's a purely static value at the moment and there's no convenient way to tune that via slub's sysfs interface. Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4fff385b17a3..a3e2d552ff46 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3838,6 +3838,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf)
3838} 3838}
3839SLAB_ATTR(order); 3839SLAB_ATTR(order);
3840 3840
3841static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3842{
3843 return sprintf(buf, "%lu\n", s->min_partial);
3844}
3845
3846static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3847 size_t length)
3848{
3849 unsigned long min;
3850 int err;
3851
3852 err = strict_strtoul(buf, 10, &min);
3853 if (err)
3854 return err;
3855
3856 calculate_min_partial(s, min);
3857 return length;
3858}
3859SLAB_ATTR(min_partial);
3860
3841static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3861static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3842{ 3862{
3843 if (s->ctor) { 3863 if (s->ctor) {
@@ -4153,6 +4173,7 @@ static struct attribute *slab_attrs[] = {
4153 &object_size_attr.attr, 4173 &object_size_attr.attr,
4154 &objs_per_slab_attr.attr, 4174 &objs_per_slab_attr.attr,
4155 &order_attr.attr, 4175 &order_attr.attr,
4176 &min_partial_attr.attr,
4156 &objects_attr.attr, 4177 &objects_attr.attr,
4157 &objects_partial_attr.attr, 4178 &objects_partial_attr.attr,
4158 &total_objects_attr.attr, 4179 &total_objects_attr.attr,