aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-12-18 17:23:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 18:02:14 -0500
commit943a451a87d229ca564a27274b58eaeae35fde5d (patch)
tree607a0bc1aba3911602dec1448be2ace190b5c1eb
parent749c54151a6e5b229e4ae067dbc651e54b161fbc (diff)
slab: propagate tunable values
SLAB allows us to tune a particular cache behavior with tunables. When creating a new memcg cache copy, we'd like to preserve any tunables the parent cache already had. This could be done by an explicit call to do_tune_cpucache() after the cache is created. But this is not very convenient now that the caches are created from common code, since this function is SLAB-specific. Another method of doing that is taking advantage of the fact that do_tune_cpucache() is always called from enable_cpucache(), which is called at cache initialization. We can just preset the values, and then things work as expected. It can also happen that a root cache has its tunables updated during normal system operation. In this case, we will propagate the change to all caches that are already active. This change will require us to move the assignment of root_cache in memcg_params a bit earlier. We need this to be already set - which memcg_kmem_register_cache will do - when we reach __kmem_cache_create() Signed-off-by: Glauber Costa <glommer@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: JoonSoo Kim <js1304@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michal Hocko <mhocko@suse.cz> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h8
-rw-r--r--include/linux/slab.h2
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/slab.c44
-rw-r--r--mm/slab.h12
-rw-r--r--mm/slab_common.c7
6 files changed, 69 insertions, 14 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 8dc7c746b44f..ea02ff970836 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -448,7 +448,8 @@ void __memcg_kmem_commit_charge(struct page *page,
448void __memcg_kmem_uncharge_pages(struct page *page, int order); 448void __memcg_kmem_uncharge_pages(struct page *page, int order);
449 449
450int memcg_cache_id(struct mem_cgroup *memcg); 450int memcg_cache_id(struct mem_cgroup *memcg);
451int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s); 451int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
452 struct kmem_cache *root_cache);
452void memcg_release_cache(struct kmem_cache *cachep); 453void memcg_release_cache(struct kmem_cache *cachep);
453void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); 454void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
454 455
@@ -590,8 +591,9 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
590 return -1; 591 return -1;
591} 592}
592 593
593static inline int memcg_register_cache(struct mem_cgroup *memcg, 594static inline int
594 struct kmem_cache *s) 595memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
596 struct kmem_cache *root_cache)
595{ 597{
596 return 0; 598 return 0;
597} 599}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index b9278663f22a..5d168d7e0a28 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -130,7 +130,7 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
130 void (*)(void *)); 130 void (*)(void *));
131struct kmem_cache * 131struct kmem_cache *
132kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, 132kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
133 unsigned long, void (*)(void *)); 133 unsigned long, void (*)(void *), struct kmem_cache *);
134void kmem_cache_destroy(struct kmem_cache *); 134void kmem_cache_destroy(struct kmem_cache *);
135int kmem_cache_shrink(struct kmem_cache *); 135int kmem_cache_shrink(struct kmem_cache *);
136void kmem_cache_free(struct kmem_cache *, void *); 136void kmem_cache_free(struct kmem_cache *, void *);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a32d83c2e353..f3009b4bae51 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3012,7 +3012,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3012 return 0; 3012 return 0;
3013} 3013}
3014 3014
3015int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s) 3015int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3016 struct kmem_cache *root_cache)
3016{ 3017{
3017 size_t size = sizeof(struct memcg_cache_params); 3018 size_t size = sizeof(struct memcg_cache_params);
3018 3019
@@ -3026,8 +3027,10 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s)
3026 if (!s->memcg_params) 3027 if (!s->memcg_params)
3027 return -ENOMEM; 3028 return -ENOMEM;
3028 3029
3029 if (memcg) 3030 if (memcg) {
3030 s->memcg_params->memcg = memcg; 3031 s->memcg_params->memcg = memcg;
3032 s->memcg_params->root_cache = root_cache;
3033 }
3031 return 0; 3034 return 0;
3032} 3035}
3033 3036
@@ -3186,7 +3189,7 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
3186 return NULL; 3189 return NULL;
3187 3190
3188 new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align, 3191 new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
3189 (s->flags & ~SLAB_PANIC), s->ctor); 3192 (s->flags & ~SLAB_PANIC), s->ctor, s);
3190 3193
3191 if (new) 3194 if (new)
3192 new->allocflags |= __GFP_KMEMCG; 3195 new->allocflags |= __GFP_KMEMCG;
@@ -3226,7 +3229,6 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3226 } 3229 }
3227 3230
3228 mem_cgroup_get(memcg); 3231 mem_cgroup_get(memcg);
3229 new_cachep->memcg_params->root_cache = cachep;
3230 atomic_set(&new_cachep->memcg_params->nr_pages , 0); 3232 atomic_set(&new_cachep->memcg_params->nr_pages , 0);
3231 3233
3232 cachep->memcg_params->memcg_caches[idx] = new_cachep; 3234 cachep->memcg_params->memcg_caches[idx] = new_cachep;
diff --git a/mm/slab.c b/mm/slab.c
index 7467343f9fe7..4dcbf96a77b4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4041,7 +4041,7 @@ static void do_ccupdate_local(void *info)
4041} 4041}
4042 4042
4043/* Always called with the slab_mutex held */ 4043/* Always called with the slab_mutex held */
4044static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 4044static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
4045 int batchcount, int shared, gfp_t gfp) 4045 int batchcount, int shared, gfp_t gfp)
4046{ 4046{
4047 struct ccupdate_struct *new; 4047 struct ccupdate_struct *new;
@@ -4084,12 +4084,48 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4084 return alloc_kmemlist(cachep, gfp); 4084 return alloc_kmemlist(cachep, gfp);
4085} 4085}
4086 4086
4087static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4088 int batchcount, int shared, gfp_t gfp)
4089{
4090 int ret;
4091 struct kmem_cache *c = NULL;
4092 int i = 0;
4093
4094 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4095
4096 if (slab_state < FULL)
4097 return ret;
4098
4099 if ((ret < 0) || !is_root_cache(cachep))
4100 return ret;
4101
4102 for_each_memcg_cache_index(i) {
4103 c = cache_from_memcg(cachep, i);
4104 if (c)
4105 /* return value determined by the parent cache only */
4106 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
4107 }
4108
4109 return ret;
4110}
4111
4087/* Called with slab_mutex held always */ 4112/* Called with slab_mutex held always */
4088static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 4113static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4089{ 4114{
4090 int err; 4115 int err;
4091 int limit, shared; 4116 int limit = 0;
4117 int shared = 0;
4118 int batchcount = 0;
4119
4120 if (!is_root_cache(cachep)) {
4121 struct kmem_cache *root = memcg_root_cache(cachep);
4122 limit = root->limit;
4123 shared = root->shared;
4124 batchcount = root->batchcount;
4125 }
4092 4126
4127 if (limit && shared && batchcount)
4128 goto skip_setup;
4093 /* 4129 /*
4094 * The head array serves three purposes: 4130 * The head array serves three purposes:
4095 * - create a LIFO ordering, i.e. return objects that are cache-warm 4131 * - create a LIFO ordering, i.e. return objects that are cache-warm
@@ -4131,7 +4167,9 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4131 if (limit > 32) 4167 if (limit > 32)
4132 limit = 32; 4168 limit = 32;
4133#endif 4169#endif
4134 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); 4170 batchcount = (limit + 1) / 2;
4171skip_setup:
4172 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4135 if (err) 4173 if (err)
4136 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 4174 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4137 cachep->name, -err); 4175 cachep->name, -err);
diff --git a/mm/slab.h b/mm/slab.h
index ec5dae1c8e75..34a98d642196 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -155,6 +155,13 @@ static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
155{ 155{
156 return s->memcg_params->memcg_caches[idx]; 156 return s->memcg_params->memcg_caches[idx];
157} 157}
158
159static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
160{
161 if (is_root_cache(s))
162 return s;
163 return s->memcg_params->root_cache;
164}
158#else 165#else
159static inline bool is_root_cache(struct kmem_cache *s) 166static inline bool is_root_cache(struct kmem_cache *s)
160{ 167{
@@ -190,6 +197,11 @@ static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
190{ 197{
191 return NULL; 198 return NULL;
192} 199}
200
201static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
202{
203 return s;
204}
193#endif 205#endif
194 206
195static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 207static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 081f1b8d9a7b..3f3cd97d3fdf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -164,7 +164,8 @@ unsigned long calculate_alignment(unsigned long flags,
164 164
165struct kmem_cache * 165struct kmem_cache *
166kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, 166kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
167 size_t align, unsigned long flags, void (*ctor)(void *)) 167 size_t align, unsigned long flags, void (*ctor)(void *),
168 struct kmem_cache *parent_cache)
168{ 169{
169 struct kmem_cache *s = NULL; 170 struct kmem_cache *s = NULL;
170 int err = 0; 171 int err = 0;
@@ -193,7 +194,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
193 s->align = calculate_alignment(flags, align, size); 194 s->align = calculate_alignment(flags, align, size);
194 s->ctor = ctor; 195 s->ctor = ctor;
195 196
196 if (memcg_register_cache(memcg, s)) { 197 if (memcg_register_cache(memcg, s, parent_cache)) {
197 kmem_cache_free(kmem_cache, s); 198 kmem_cache_free(kmem_cache, s);
198 err = -ENOMEM; 199 err = -ENOMEM;
199 goto out_locked; 200 goto out_locked;
@@ -243,7 +244,7 @@ struct kmem_cache *
243kmem_cache_create(const char *name, size_t size, size_t align, 244kmem_cache_create(const char *name, size_t size, size_t align,
244 unsigned long flags, void (*ctor)(void *)) 245 unsigned long flags, void (*ctor)(void *))
245{ 246{
246 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor); 247 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
247} 248}
248EXPORT_SYMBOL(kmem_cache_create); 249EXPORT_SYMBOL(kmem_cache_create);
249 250