diff options
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 90 |
1 files changed, 54 insertions, 36 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 0b7bb399b0e4..8e40321da091 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -171,13 +171,26 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, | |||
171 | struct kmem_cache *parent_cache) | 171 | struct kmem_cache *parent_cache) |
172 | { | 172 | { |
173 | struct kmem_cache *s = NULL; | 173 | struct kmem_cache *s = NULL; |
174 | int err = 0; | 174 | int err; |
175 | 175 | ||
176 | get_online_cpus(); | 176 | get_online_cpus(); |
177 | mutex_lock(&slab_mutex); | 177 | mutex_lock(&slab_mutex); |
178 | 178 | ||
179 | if (!kmem_cache_sanity_check(memcg, name, size) == 0) | 179 | err = kmem_cache_sanity_check(memcg, name, size); |
180 | goto out_locked; | 180 | if (err) |
181 | goto out_unlock; | ||
182 | |||
183 | if (memcg) { | ||
184 | /* | ||
185 | * Since per-memcg caches are created asynchronously on first | ||
186 | * allocation (see memcg_kmem_get_cache()), several threads can | ||
187 | * try to create the same cache, but only one of them may | ||
188 | * succeed. Therefore if we get here and see the cache has | ||
189 | * already been created, we silently return NULL. | ||
190 | */ | ||
191 | if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg))) | ||
192 | goto out_unlock; | ||
193 | } | ||
181 | 194 | ||
182 | /* | 195 | /* |
183 | * Some allocators will constraint the set of valid flags to a subset | 196 | * Some allocators will constraint the set of valid flags to a subset |
@@ -189,45 +202,45 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, | |||
189 | 202 | ||
190 | s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); | 203 | s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); |
191 | if (s) | 204 | if (s) |
192 | goto out_locked; | 205 | goto out_unlock; |
193 | 206 | ||
207 | err = -ENOMEM; | ||
194 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); | 208 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); |
195 | if (s) { | 209 | if (!s) |
196 | s->object_size = s->size = size; | 210 | goto out_unlock; |
197 | s->align = calculate_alignment(flags, align, size); | ||
198 | s->ctor = ctor; | ||
199 | 211 | ||
200 | if (memcg_register_cache(memcg, s, parent_cache)) { | 212 | s->object_size = s->size = size; |
201 | kmem_cache_free(kmem_cache, s); | 213 | s->align = calculate_alignment(flags, align, size); |
202 | err = -ENOMEM; | 214 | s->ctor = ctor; |
203 | goto out_locked; | ||
204 | } | ||
205 | 215 | ||
206 | s->name = kstrdup(name, GFP_KERNEL); | 216 | s->name = kstrdup(name, GFP_KERNEL); |
207 | if (!s->name) { | 217 | if (!s->name) |
208 | kmem_cache_free(kmem_cache, s); | 218 | goto out_free_cache; |
209 | err = -ENOMEM; | ||
210 | goto out_locked; | ||
211 | } | ||
212 | 219 | ||
213 | err = __kmem_cache_create(s, flags); | 220 | err = memcg_alloc_cache_params(memcg, s, parent_cache); |
214 | if (!err) { | 221 | if (err) |
215 | s->refcount = 1; | 222 | goto out_free_cache; |
216 | list_add(&s->list, &slab_caches); | 223 | |
217 | memcg_cache_list_add(memcg, s); | 224 | err = __kmem_cache_create(s, flags); |
218 | } else { | 225 | if (err) |
219 | kfree(s->name); | 226 | goto out_free_cache; |
220 | kmem_cache_free(kmem_cache, s); | ||
221 | } | ||
222 | } else | ||
223 | err = -ENOMEM; | ||
224 | 227 | ||
225 | out_locked: | 228 | s->refcount = 1; |
229 | list_add(&s->list, &slab_caches); | ||
230 | memcg_register_cache(s); | ||
231 | |||
232 | out_unlock: | ||
226 | mutex_unlock(&slab_mutex); | 233 | mutex_unlock(&slab_mutex); |
227 | put_online_cpus(); | 234 | put_online_cpus(); |
228 | 235 | ||
229 | if (err) { | 236 | /* |
230 | 237 | * There is no point in flooding logs with warnings or especially | |
238 | * crashing the system if we fail to create a cache for a memcg. In | ||
239 | * this case we will be accounting the memcg allocation to the root | ||
240 | * cgroup until we succeed to create its own cache, but it isn't that | ||
241 | * critical. | ||
242 | */ | ||
243 | if (err && !memcg) { | ||
231 | if (flags & SLAB_PANIC) | 244 | if (flags & SLAB_PANIC) |
232 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", | 245 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", |
233 | name, err); | 246 | name, err); |
@@ -236,11 +249,15 @@ out_locked: | |||
236 | name, err); | 249 | name, err); |
237 | dump_stack(); | 250 | dump_stack(); |
238 | } | 251 | } |
239 | |||
240 | return NULL; | 252 | return NULL; |
241 | } | 253 | } |
242 | |||
243 | return s; | 254 | return s; |
255 | |||
256 | out_free_cache: | ||
257 | memcg_free_cache_params(s); | ||
258 | kfree(s->name); | ||
259 | kmem_cache_free(kmem_cache, s); | ||
260 | goto out_unlock; | ||
244 | } | 261 | } |
245 | 262 | ||
246 | struct kmem_cache * | 263 | struct kmem_cache * |
@@ -263,11 +280,12 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
263 | list_del(&s->list); | 280 | list_del(&s->list); |
264 | 281 | ||
265 | if (!__kmem_cache_shutdown(s)) { | 282 | if (!__kmem_cache_shutdown(s)) { |
283 | memcg_unregister_cache(s); | ||
266 | mutex_unlock(&slab_mutex); | 284 | mutex_unlock(&slab_mutex); |
267 | if (s->flags & SLAB_DESTROY_BY_RCU) | 285 | if (s->flags & SLAB_DESTROY_BY_RCU) |
268 | rcu_barrier(); | 286 | rcu_barrier(); |
269 | 287 | ||
270 | memcg_release_cache(s); | 288 | memcg_free_cache_params(s); |
271 | kfree(s->name); | 289 | kfree(s->name); |
272 | kmem_cache_free(kmem_cache, s); | 290 | kmem_cache_free(kmem_cache, s); |
273 | } else { | 291 | } else { |