aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-01-23 18:53:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:51 -0500
commitf8570263ee16eb1d5038b8e20d7db3a68bbb2b49 (patch)
tree06d54d7f5f357622604d1e8338478b906729409d /mm
parentf717eb3abb5ea38f60e671dbfdbf512c2c93d22e (diff)
memcg, slab: RCU protect memcg_params for root caches
We relocate root cache's memcg_params whenever we need to grow the memcg_caches array to accommodate all kmem-active memory cgroups. Currently on relocation we free the old version immediately, which can lead to use-after-free, because the memcg_caches array is accessed lock-free (see cache_from_memcg_idx()). This patch fixes this by making memcg_params RCU-protected for root caches. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Glauber Costa <glommer@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/slab.h16
2 files changed, 23 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 80197e544764..216659d4441a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3178,18 +3178,17 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3178 3178
3179 if (num_groups > memcg_limited_groups_array_size) { 3179 if (num_groups > memcg_limited_groups_array_size) {
3180 int i; 3180 int i;
3181 struct memcg_cache_params *new_params;
3181 ssize_t size = memcg_caches_array_size(num_groups); 3182 ssize_t size = memcg_caches_array_size(num_groups);
3182 3183
3183 size *= sizeof(void *); 3184 size *= sizeof(void *);
3184 size += offsetof(struct memcg_cache_params, memcg_caches); 3185 size += offsetof(struct memcg_cache_params, memcg_caches);
3185 3186
3186 s->memcg_params = kzalloc(size, GFP_KERNEL); 3187 new_params = kzalloc(size, GFP_KERNEL);
3187 if (!s->memcg_params) { 3188 if (!new_params)
3188 s->memcg_params = cur_params;
3189 return -ENOMEM; 3189 return -ENOMEM;
3190 }
3191 3190
3192 s->memcg_params->is_root_cache = true; 3191 new_params->is_root_cache = true;
3193 3192
3194 /* 3193 /*
3195 * There is the chance it will be bigger than 3194 * There is the chance it will be bigger than
@@ -3203,7 +3202,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3203 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3202 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3204 if (!cur_params->memcg_caches[i]) 3203 if (!cur_params->memcg_caches[i])
3205 continue; 3204 continue;
3206 s->memcg_params->memcg_caches[i] = 3205 new_params->memcg_caches[i] =
3207 cur_params->memcg_caches[i]; 3206 cur_params->memcg_caches[i];
3208 } 3207 }
3209 3208
@@ -3216,7 +3215,9 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3216 * bigger than the others. And all updates will reset this 3215 * bigger than the others. And all updates will reset this
3217 * anyway. 3216 * anyway.
3218 */ 3217 */
3219 kfree(cur_params); 3218 rcu_assign_pointer(s->memcg_params, new_params);
3219 if (cur_params)
3220 kfree_rcu(cur_params, rcu_head);
3220 } 3221 }
3221 return 0; 3222 return 0;
3222} 3223}
diff --git a/mm/slab.h b/mm/slab.h
index 72d1f9df71bd..8184a7cde272 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -160,14 +160,28 @@ static inline const char *cache_name(struct kmem_cache *s)
160 return s->name; 160 return s->name;
161} 161}
162 162
163/*
164 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
165 * That said the caller must assure the memcg's cache won't go away. Since once
166 * created a memcg's cache is destroyed only along with the root cache, it is
167 * true if we are going to allocate from the cache or hold a reference to the
168 * root cache by other means. Otherwise, we should hold either the slab_mutex
169 * or the memcg's slab_caches_mutex while calling this function and accessing
170 * the returned value.
171 */
163static inline struct kmem_cache * 172static inline struct kmem_cache *
164cache_from_memcg_idx(struct kmem_cache *s, int idx) 173cache_from_memcg_idx(struct kmem_cache *s, int idx)
165{ 174{
166 struct kmem_cache *cachep; 175 struct kmem_cache *cachep;
176 struct memcg_cache_params *params;
167 177
168 if (!s->memcg_params) 178 if (!s->memcg_params)
169 return NULL; 179 return NULL;
170 cachep = s->memcg_params->memcg_caches[idx]; 180
181 rcu_read_lock();
182 params = rcu_dereference(s->memcg_params);
183 cachep = params->memcg_caches[idx];
184 rcu_read_unlock();
171 185
172 /* 186 /*
173 * Make sure we will access the up-to-date value. The code updating 187 * Make sure we will access the up-to-date value. The code updating