aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h9
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/slab.h16
3 files changed, 30 insertions, 10 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1e2f4fe12773..a060142aa5f5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -513,7 +513,9 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
513 * 513 *
514 * Both the root cache and the child caches will have it. For the root cache, 514 * Both the root cache and the child caches will have it. For the root cache,
515 * this will hold a dynamically allocated array large enough to hold 515 * this will hold a dynamically allocated array large enough to hold
516 * information about the currently limited memcgs in the system. 516 * information about the currently limited memcgs in the system. To allow the
517 * array to be accessed without taking any locks, on relocation we free the old
518 * version only after a grace period.
517 * 519 *
518 * Child caches will hold extra metadata needed for its operation. Fields are: 520 * Child caches will hold extra metadata needed for its operation. Fields are:
519 * 521 *
@@ -528,7 +530,10 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
528struct memcg_cache_params { 530struct memcg_cache_params {
529 bool is_root_cache; 531 bool is_root_cache;
530 union { 532 union {
531 struct kmem_cache *memcg_caches[0]; 533 struct {
534 struct rcu_head rcu_head;
535 struct kmem_cache *memcg_caches[0];
536 };
532 struct { 537 struct {
533 struct mem_cgroup *memcg; 538 struct mem_cgroup *memcg;
534 struct list_head list; 539 struct list_head list;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 80197e544764..216659d4441a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3178,18 +3178,17 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3178 3178
3179 if (num_groups > memcg_limited_groups_array_size) { 3179 if (num_groups > memcg_limited_groups_array_size) {
3180 int i; 3180 int i;
3181 struct memcg_cache_params *new_params;
3181 ssize_t size = memcg_caches_array_size(num_groups); 3182 ssize_t size = memcg_caches_array_size(num_groups);
3182 3183
3183 size *= sizeof(void *); 3184 size *= sizeof(void *);
3184 size += offsetof(struct memcg_cache_params, memcg_caches); 3185 size += offsetof(struct memcg_cache_params, memcg_caches);
3185 3186
3186 s->memcg_params = kzalloc(size, GFP_KERNEL); 3187 new_params = kzalloc(size, GFP_KERNEL);
3187 if (!s->memcg_params) { 3188 if (!new_params)
3188 s->memcg_params = cur_params;
3189 return -ENOMEM; 3189 return -ENOMEM;
3190 }
3191 3190
3192 s->memcg_params->is_root_cache = true; 3191 new_params->is_root_cache = true;
3193 3192
3194 /* 3193 /*
3195 * There is the chance it will be bigger than 3194 * There is the chance it will be bigger than
@@ -3203,7 +3202,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3203 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3202 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3204 if (!cur_params->memcg_caches[i]) 3203 if (!cur_params->memcg_caches[i])
3205 continue; 3204 continue;
3206 s->memcg_params->memcg_caches[i] = 3205 new_params->memcg_caches[i] =
3207 cur_params->memcg_caches[i]; 3206 cur_params->memcg_caches[i];
3208 } 3207 }
3209 3208
@@ -3216,7 +3215,9 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3216 * bigger than the others. And all updates will reset this 3215 * bigger than the others. And all updates will reset this
3217 * anyway. 3216 * anyway.
3218 */ 3217 */
3219 kfree(cur_params); 3218 rcu_assign_pointer(s->memcg_params, new_params);
3219 if (cur_params)
3220 kfree_rcu(cur_params, rcu_head);
3220 } 3221 }
3221 return 0; 3222 return 0;
3222} 3223}
diff --git a/mm/slab.h b/mm/slab.h
index 72d1f9df71bd..8184a7cde272 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -160,14 +160,28 @@ static inline const char *cache_name(struct kmem_cache *s)
160 return s->name; 160 return s->name;
161} 161}
162 162
163/*
164 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
165 * That said the caller must assure the memcg's cache won't go away. Since once
166 * created a memcg's cache is destroyed only along with the root cache, it is
167 * true if we are going to allocate from the cache or hold a reference to the
168 * root cache by other means. Otherwise, we should hold either the slab_mutex
169 * or the memcg's slab_caches_mutex while calling this function and accessing
170 * the returned value.
171 */
163static inline struct kmem_cache * 172static inline struct kmem_cache *
164cache_from_memcg_idx(struct kmem_cache *s, int idx) 173cache_from_memcg_idx(struct kmem_cache *s, int idx)
165{ 174{
166 struct kmem_cache *cachep; 175 struct kmem_cache *cachep;
176 struct memcg_cache_params *params;
167 177
168 if (!s->memcg_params) 178 if (!s->memcg_params)
169 return NULL; 179 return NULL;
170 cachep = s->memcg_params->memcg_caches[idx]; 180
181 rcu_read_lock();
182 params = rcu_dereference(s->memcg_params);
183 cachep = params->memcg_caches[idx];
184 rcu_read_unlock();
171 185
172 /* 186 /*
173 * Make sure we will access the up-to-date value. The code updating 187 * Make sure we will access the up-to-date value. The code updating