aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-12-18 17:23:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 18:02:14 -0500
commit107dab5c92d5f9c3afe962036e47c207363255c7 (patch)
tree17722e888fc00428ef1614a097bae82d1e9de87a /mm
parent943a451a87d229ca564a27274b58eaeae35fde5d (diff)
slub: slub-specific propagation changes
SLUB allows us to tune a particular cache behavior with sysfs-based tunables. When creating a new memcg cache copy, we'd like to preserve any tunables the parent cache already had. This can be done by tapping into the store attribute function provided by the allocator. We of course don't need to mess with read-only fields. Since the attributes can have multiple types and are stored internally by sysfs, the best strategy is to issue a ->show() in the root cache, and then ->store() in the memcg cache. The drawback of that, is that sysfs can allocate up to a page in buffering for show(), that we are likely not to need, but also can't guarantee. To avoid always allocating a page for that, we can update the caches at store time with the maximum attribute size ever stored to the root cache. We will then get a buffer big enough to hold it. The corolary to this, is that if no stores happened, nothing will be propagated. It can also happen that a root cache has its tunables updated during normal system operation. In this case, we will propagate the change to all caches that are already active. [akpm@linux-foundation.org: tweak code to avoid __maybe_unused] Signed-off-by: Glauber Costa <glommer@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: JoonSoo Kim <js1304@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michal Hocko <mhocko@suse.cz> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c76
1 files changed, 75 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 692177bebdf0..21c94d9695ec 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -201,13 +201,14 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
201static int sysfs_slab_add(struct kmem_cache *); 201static int sysfs_slab_add(struct kmem_cache *);
202static int sysfs_slab_alias(struct kmem_cache *, const char *); 202static int sysfs_slab_alias(struct kmem_cache *, const char *);
203static void sysfs_slab_remove(struct kmem_cache *); 203static void sysfs_slab_remove(struct kmem_cache *);
204 204static void memcg_propagate_slab_attrs(struct kmem_cache *s);
205#else 205#else
206static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 206static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
207static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 207static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
208 { return 0; } 208 { return 0; }
209static inline void sysfs_slab_remove(struct kmem_cache *s) { } 209static inline void sysfs_slab_remove(struct kmem_cache *s) { }
210 210
211static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
211#endif 212#endif
212 213
213static inline void stat(const struct kmem_cache *s, enum stat_item si) 214static inline void stat(const struct kmem_cache *s, enum stat_item si)
@@ -3865,6 +3866,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3865 if (slab_state <= UP) 3866 if (slab_state <= UP)
3866 return 0; 3867 return 0;
3867 3868
3869 memcg_propagate_slab_attrs(s);
3868 mutex_unlock(&slab_mutex); 3870 mutex_unlock(&slab_mutex);
3869 err = sysfs_slab_add(s); 3871 err = sysfs_slab_add(s);
3870 mutex_lock(&slab_mutex); 3872 mutex_lock(&slab_mutex);
@@ -5098,10 +5100,82 @@ static ssize_t slab_attr_store(struct kobject *kobj,
5098 return -EIO; 5100 return -EIO;
5099 5101
5100 err = attribute->store(s, buf, len); 5102 err = attribute->store(s, buf, len);
5103#ifdef CONFIG_MEMCG_KMEM
5104 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5105 int i;
5106
5107 mutex_lock(&slab_mutex);
5108 if (s->max_attr_size < len)
5109 s->max_attr_size = len;
5101 5110
5111 for_each_memcg_cache_index(i) {
5112 struct kmem_cache *c = cache_from_memcg(s, i);
5113 /*
5114 * This function's return value is determined by the
5115 * parent cache only
5116 */
5117 if (c)
5118 attribute->store(c, buf, len);
5119 }
5120 mutex_unlock(&slab_mutex);
5121 }
5122#endif
5102 return err; 5123 return err;
5103} 5124}
5104 5125
5126static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5127{
5128#ifdef CONFIG_MEMCG_KMEM
5129 int i;
5130 char *buffer = NULL;
5131
5132 if (!is_root_cache(s))
5133 return;
5134
5135 /*
5136 * This mean this cache had no attribute written. Therefore, no point
5137 * in copying default values around
5138 */
5139 if (!s->max_attr_size)
5140 return;
5141
5142 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5143 char mbuf[64];
5144 char *buf;
5145 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5146
5147 if (!attr || !attr->store || !attr->show)
5148 continue;
5149
5150 /*
5151 * It is really bad that we have to allocate here, so we will
5152 * do it only as a fallback. If we actually allocate, though,
5153 * we can just use the allocated buffer until the end.
5154 *
5155 * Most of the slub attributes will tend to be very small in
5156 * size, but sysfs allows buffers up to a page, so they can
5157 * theoretically happen.
5158 */
5159 if (buffer)
5160 buf = buffer;
5161 else if (s->max_attr_size < ARRAY_SIZE(mbuf))
5162 buf = mbuf;
5163 else {
5164 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5165 if (WARN_ON(!buffer))
5166 continue;
5167 buf = buffer;
5168 }
5169
5170 attr->show(s->memcg_params->root_cache, buf);
5171 attr->store(s, buf, strlen(buf));
5172 }
5173
5174 if (buffer)
5175 free_page((unsigned long)buffer);
5176#endif
5177}
5178
5105static const struct sysfs_ops slab_sysfs_ops = { 5179static const struct sysfs_ops slab_sysfs_ops = {
5106 .show = slab_attr_show, 5180 .show = slab_attr_show,
5107 .store = slab_attr_store, 5181 .store = slab_attr_store,