aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/user.c
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2008-01-25 15:08:26 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:26 -0500
commit8eb703e4f33488bf75829564d51d427e17f7cd4c (patch)
treeee520bf5f1b6c5b61f3b2b6b950ecb4108fc22fc /kernel/user.c
parentdc938520d2bf343b239795cfa24e4f44649358dc (diff)
uids: merge multiple error paths in alloc_uid() into one
There are already 4 error paths in alloc_uid() that do incremental rollbacks. I think it's time to merge them. This costs us 8 lines of code :) Maybe it would be better to merge this patch with the previous one, but I remember that some time ago I sent a similar patch (fixing the error path and cleaning it), but I was told to make two patches in such cases. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Acked-by: Dhaval Giani <dhaval@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/user.c')
-rw-r--r--kernel/user.c47
1 files changed, 20 insertions, 27 deletions
diff --git a/kernel/user.c b/kernel/user.c
index ab4fd706993b..bc1c48d35cb3 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -319,7 +319,7 @@ void free_uid(struct user_struct *up)
319struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) 319struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
320{ 320{
321 struct hlist_head *hashent = uidhashentry(ns, uid); 321 struct hlist_head *hashent = uidhashentry(ns, uid);
322 struct user_struct *up; 322 struct user_struct *up, *new;
323 323
324 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() 324 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
325 * atomic. 325 * atomic.
@@ -331,13 +331,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
331 spin_unlock_irq(&uidhash_lock); 331 spin_unlock_irq(&uidhash_lock);
332 332
333 if (!up) { 333 if (!up) {
334 struct user_struct *new;
335
336 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); 334 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
337 if (!new) { 335 if (!new)
338 uids_mutex_unlock(); 336 goto out_unlock;
339 return NULL;
340 }
341 337
342 new->uid = uid; 338 new->uid = uid;
343 atomic_set(&new->__count, 1); 339 atomic_set(&new->__count, 1);
@@ -353,28 +349,14 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
353#endif 349#endif
354 new->locked_shm = 0; 350 new->locked_shm = 0;
355 351
356 if (alloc_uid_keyring(new, current) < 0) { 352 if (alloc_uid_keyring(new, current) < 0)
357 kmem_cache_free(uid_cachep, new); 353 goto out_free_user;
358 uids_mutex_unlock();
359 return NULL;
360 }
361 354
362 if (sched_create_user(new) < 0) { 355 if (sched_create_user(new) < 0)
363 key_put(new->uid_keyring); 356 goto out_put_keys;
364 key_put(new->session_keyring);
365 kmem_cache_free(uid_cachep, new);
366 uids_mutex_unlock();
367 return NULL;
368 }
369 357
370 if (uids_user_create(new)) { 358 if (uids_user_create(new))
371 sched_destroy_user(new); 359 goto out_destoy_sched;
372 key_put(new->uid_keyring);
373 key_put(new->session_keyring);
374 kmem_cache_free(uid_cachep, new);
375 uids_mutex_unlock();
376 return NULL;
377 }
378 360
379 /* 361 /*
380 * Before adding this, check whether we raced 362 * Before adding this, check whether we raced
@@ -402,6 +384,17 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
402 uids_mutex_unlock(); 384 uids_mutex_unlock();
403 385
404 return up; 386 return up;
387
388out_destoy_sched:
389 sched_destroy_user(new);
390out_put_keys:
391 key_put(new->uid_keyring);
392 key_put(new->session_keyring);
393out_free_user:
394 kmem_cache_free(uid_cachep, new);
395out_unlock:
396 uids_mutex_unlock();
397 return NULL;
405} 398}
406 399
407void switch_uid(struct user_struct *new_user) 400void switch_uid(struct user_struct *new_user)