aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-01-23 18:52:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit3965fc3652244651006ebb31c8c45318ce84818f (patch)
tree6ab0e736387b7694d874809ff4b8e9b463461f28 /mm
parent309381feaee564281c3d9e90fbca8963bb7428ad (diff)
slab: clean up kmem_cache_create_memcg() error handling
Currently kmem_cache_create_memcg() backoffs on failure inside conditionals, without using gotos. This results in the rollback code duplication, which makes the function look cumbersome even though on error we should only free the allocated cache. Since in the next patch I am going to add yet another rollback function call on error path there, let's employ labels instead of conditionals for undoing any changes on failure to keep things clean. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Reviewed-by: Pekka Enberg <penberg@kernel.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Glauber Costa <glommer@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab_common.c65
1 files changed, 31 insertions, 34 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0b7bb399b0e4..f70df3ef6f1a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -171,13 +171,14 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
171 struct kmem_cache *parent_cache) 171 struct kmem_cache *parent_cache)
172{ 172{
173 struct kmem_cache *s = NULL; 173 struct kmem_cache *s = NULL;
174 int err = 0; 174 int err;
175 175
176 get_online_cpus(); 176 get_online_cpus();
177 mutex_lock(&slab_mutex); 177 mutex_lock(&slab_mutex);
178 178
179 if (!kmem_cache_sanity_check(memcg, name, size) == 0) 179 err = kmem_cache_sanity_check(memcg, name, size);
180 goto out_locked; 180 if (err)
181 goto out_unlock;
181 182
182 /* 183 /*
183 * Some allocators will constraint the set of valid flags to a subset 184 * Some allocators will constraint the set of valid flags to a subset
@@ -189,45 +190,38 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
189 190
190 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); 191 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
191 if (s) 192 if (s)
192 goto out_locked; 193 goto out_unlock;
193 194
195 err = -ENOMEM;
194 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 196 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
195 if (s) { 197 if (!s)
196 s->object_size = s->size = size; 198 goto out_unlock;
197 s->align = calculate_alignment(flags, align, size);
198 s->ctor = ctor;
199 199
200 if (memcg_register_cache(memcg, s, parent_cache)) { 200 s->object_size = s->size = size;
201 kmem_cache_free(kmem_cache, s); 201 s->align = calculate_alignment(flags, align, size);
202 err = -ENOMEM; 202 s->ctor = ctor;
203 goto out_locked;
204 }
205 203
206 s->name = kstrdup(name, GFP_KERNEL); 204 s->name = kstrdup(name, GFP_KERNEL);
207 if (!s->name) { 205 if (!s->name)
208 kmem_cache_free(kmem_cache, s); 206 goto out_free_cache;
209 err = -ENOMEM;
210 goto out_locked;
211 }
212 207
213 err = __kmem_cache_create(s, flags); 208 err = memcg_register_cache(memcg, s, parent_cache);
214 if (!err) { 209 if (err)
215 s->refcount = 1; 210 goto out_free_cache;
216 list_add(&s->list, &slab_caches); 211
217 memcg_cache_list_add(memcg, s); 212 err = __kmem_cache_create(s, flags);
218 } else { 213 if (err)
219 kfree(s->name); 214 goto out_free_cache;
220 kmem_cache_free(kmem_cache, s); 215
221 } 216 s->refcount = 1;
222 } else 217 list_add(&s->list, &slab_caches);
223 err = -ENOMEM; 218 memcg_cache_list_add(memcg, s);
224 219
225out_locked: 220out_unlock:
226 mutex_unlock(&slab_mutex); 221 mutex_unlock(&slab_mutex);
227 put_online_cpus(); 222 put_online_cpus();
228 223
229 if (err) { 224 if (err) {
230
231 if (flags & SLAB_PANIC) 225 if (flags & SLAB_PANIC)
232 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", 226 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
233 name, err); 227 name, err);
@@ -236,11 +230,14 @@ out_locked:
236 name, err); 230 name, err);
237 dump_stack(); 231 dump_stack();
238 } 232 }
239
240 return NULL; 233 return NULL;
241 } 234 }
242
243 return s; 235 return s;
236
237out_free_cache:
238 kfree(s->name);
239 kmem_cache_free(kmem_cache, s);
240 goto out_unlock;
244} 241}
245 242
246struct kmem_cache * 243struct kmem_cache *