aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-01-23 18:52:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:51 -0500
commit959c8963fc6c8c9b97e80c55ce77105247040e7d (patch)
tree4f2bcf3e4a69f4a655fabf0fc178498f3d0c367f /mm
parent1aa13254259bdef0bca723849ab3bab308d2f0c3 (diff)
memcg, slab: fix barrier usage when accessing memcg_caches
Each root kmem_cache has pointers to per-memcg caches stored in its memcg_params::memcg_caches array. Whenever we want to allocate a slab for a memcg, we access this array to get per-memcg cache to allocate from (see memcg_kmem_get_cache()). The access must be lock-free for performance reasons, so we should use barriers to assert the kmem_cache is up-to-date. First, we should place a write barrier immediately before setting the pointer to it in the memcg_caches array in order to make sure nobody will see a partially initialized object. Second, we should issue a read barrier before dereferencing the pointer to conform to the write barrier. However, currently the barrier usage looks rather strange. We have a write barrier *after* setting the pointer and a read barrier *before* reading the pointer, which is incorrect. This patch fixes this. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Glauber Costa <glommer@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c24
-rw-r--r--mm/slab.h12
2 files changed, 21 insertions, 15 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 739383cd3f70..322d18dc17f0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3274,12 +3274,14 @@ void memcg_register_cache(struct kmem_cache *s)
3274 list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); 3274 list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
3275 mutex_unlock(&memcg->slab_caches_mutex); 3275 mutex_unlock(&memcg->slab_caches_mutex);
3276 3276
3277 root->memcg_params->memcg_caches[id] = s;
3278 /* 3277 /*
3279 * the readers won't lock, make sure everybody sees the updated value, 3278 * Since readers won't lock (see cache_from_memcg_idx()), we need a
3280 * so they won't put stuff in the queue again for no reason 3279 * barrier here to ensure nobody will see the kmem_cache partially
3280 * initialized.
3281 */ 3281 */
3282 wmb(); 3282 smp_wmb();
3283
3284 root->memcg_params->memcg_caches[id] = s;
3283} 3285}
3284 3286
3285void memcg_unregister_cache(struct kmem_cache *s) 3287void memcg_unregister_cache(struct kmem_cache *s)
@@ -3605,7 +3607,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3605 gfp_t gfp) 3607 gfp_t gfp)
3606{ 3608{
3607 struct mem_cgroup *memcg; 3609 struct mem_cgroup *memcg;
3608 int idx; 3610 struct kmem_cache *memcg_cachep;
3609 3611
3610 VM_BUG_ON(!cachep->memcg_params); 3612 VM_BUG_ON(!cachep->memcg_params);
3611 VM_BUG_ON(!cachep->memcg_params->is_root_cache); 3613 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
@@ -3619,15 +3621,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3619 if (!memcg_can_account_kmem(memcg)) 3621 if (!memcg_can_account_kmem(memcg))
3620 goto out; 3622 goto out;
3621 3623
3622 idx = memcg_cache_id(memcg); 3624 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3623 3625 if (likely(memcg_cachep)) {
3624 /* 3626 cachep = memcg_cachep;
3625 * barrier to mare sure we're always seeing the up to date value. The
3626 * code updating memcg_caches will issue a write barrier to match this.
3627 */
3628 read_barrier_depends();
3629 if (likely(cache_from_memcg_idx(cachep, idx))) {
3630 cachep = cache_from_memcg_idx(cachep, idx);
3631 goto out; 3627 goto out;
3632 } 3628 }
3633 3629
diff --git a/mm/slab.h b/mm/slab.h
index 0859c4241ba1..72d1f9df71bd 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -163,9 +163,19 @@ static inline const char *cache_name(struct kmem_cache *s)
163static inline struct kmem_cache * 163static inline struct kmem_cache *
164cache_from_memcg_idx(struct kmem_cache *s, int idx) 164cache_from_memcg_idx(struct kmem_cache *s, int idx)
165{ 165{
166 struct kmem_cache *cachep;
167
166 if (!s->memcg_params) 168 if (!s->memcg_params)
167 return NULL; 169 return NULL;
168 return s->memcg_params->memcg_caches[idx]; 170 cachep = s->memcg_params->memcg_caches[idx];
171
172 /*
173 * Make sure we will access the up-to-date value. The code updating
174 * memcg_caches issues a write barrier to match this (see
175 * memcg_register_cache()).
176 */
177 smp_read_barrier_depends();
178 return cachep;
169} 179}
170 180
171static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 181static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)