aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/user.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/user.c')
-rw-r--r--kernel/user.c47
1 files changed, 13 insertions, 34 deletions
diff --git a/kernel/user.c b/kernel/user.c
index d476307dd4b0..c0ef3a464438 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -20,9 +20,9 @@
20 20
21struct user_namespace init_user_ns = { 21struct user_namespace init_user_ns = {
22 .kref = { 22 .kref = {
23 .refcount = ATOMIC_INIT(2), 23 .refcount = ATOMIC_INIT(1),
24 }, 24 },
25 .root_user = &root_user, 25 .creator = &root_user,
26}; 26};
27EXPORT_SYMBOL_GPL(init_user_ns); 27EXPORT_SYMBOL_GPL(init_user_ns);
28 28
@@ -48,12 +48,14 @@ static struct kmem_cache *uid_cachep;
48 */ 48 */
49static DEFINE_SPINLOCK(uidhash_lock); 49static DEFINE_SPINLOCK(uidhash_lock);
50 50
51/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
51struct user_struct root_user = { 52struct user_struct root_user = {
52 .__count = ATOMIC_INIT(1), 53 .__count = ATOMIC_INIT(2),
53 .processes = ATOMIC_INIT(1), 54 .processes = ATOMIC_INIT(1),
54 .files = ATOMIC_INIT(0), 55 .files = ATOMIC_INIT(0),
55 .sigpending = ATOMIC_INIT(0), 56 .sigpending = ATOMIC_INIT(0),
56 .locked_shm = 0, 57 .locked_shm = 0,
58 .user_ns = &init_user_ns,
57#ifdef CONFIG_USER_SCHED 59#ifdef CONFIG_USER_SCHED
58 .tg = &init_task_group, 60 .tg = &init_task_group,
59#endif 61#endif
@@ -314,12 +316,13 @@ done:
314 * IRQ state (as stored in flags) is restored and uidhash_lock released 316 * IRQ state (as stored in flags) is restored and uidhash_lock released
315 * upon function exit. 317 * upon function exit.
316 */ 318 */
317static inline void free_user(struct user_struct *up, unsigned long flags) 319static void free_user(struct user_struct *up, unsigned long flags)
318{ 320{
319 /* restore back the count */ 321 /* restore back the count */
320 atomic_inc(&up->__count); 322 atomic_inc(&up->__count);
321 spin_unlock_irqrestore(&uidhash_lock, flags); 323 spin_unlock_irqrestore(&uidhash_lock, flags);
322 324
325 put_user_ns(up->user_ns);
323 INIT_WORK(&up->work, remove_user_sysfs_dir); 326 INIT_WORK(&up->work, remove_user_sysfs_dir);
324 schedule_work(&up->work); 327 schedule_work(&up->work);
325} 328}
@@ -335,13 +338,14 @@ static inline void uids_mutex_unlock(void) { }
335 * IRQ state (as stored in flags) is restored and uidhash_lock released 338 * IRQ state (as stored in flags) is restored and uidhash_lock released
336 * upon function exit. 339 * upon function exit.
337 */ 340 */
338static inline void free_user(struct user_struct *up, unsigned long flags) 341static void free_user(struct user_struct *up, unsigned long flags)
339{ 342{
340 uid_hash_remove(up); 343 uid_hash_remove(up);
341 spin_unlock_irqrestore(&uidhash_lock, flags); 344 spin_unlock_irqrestore(&uidhash_lock, flags);
342 sched_destroy_user(up); 345 sched_destroy_user(up);
343 key_put(up->uid_keyring); 346 key_put(up->uid_keyring);
344 key_put(up->session_keyring); 347 key_put(up->session_keyring);
348 put_user_ns(up->user_ns);
345 kmem_cache_free(uid_cachep, up); 349 kmem_cache_free(uid_cachep, up);
346} 350}
347 351
@@ -357,7 +361,7 @@ struct user_struct *find_user(uid_t uid)
357{ 361{
358 struct user_struct *ret; 362 struct user_struct *ret;
359 unsigned long flags; 363 unsigned long flags;
360 struct user_namespace *ns = current->nsproxy->user_ns; 364 struct user_namespace *ns = current_user()->user_ns;
361 365
362 spin_lock_irqsave(&uidhash_lock, flags); 366 spin_lock_irqsave(&uidhash_lock, flags);
363 ret = uid_hash_find(uid, uidhashentry(ns, uid)); 367 ret = uid_hash_find(uid, uidhashentry(ns, uid));
@@ -404,6 +408,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
404 if (sched_create_user(new) < 0) 408 if (sched_create_user(new) < 0)
405 goto out_free_user; 409 goto out_free_user;
406 410
411 new->user_ns = get_user_ns(ns);
412
407 if (uids_user_create(new)) 413 if (uids_user_create(new))
408 goto out_destoy_sched; 414 goto out_destoy_sched;
409 415
@@ -427,7 +433,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
427 up = new; 433 up = new;
428 } 434 }
429 spin_unlock_irq(&uidhash_lock); 435 spin_unlock_irq(&uidhash_lock);
430
431 } 436 }
432 437
433 uids_mutex_unlock(); 438 uids_mutex_unlock();
@@ -436,6 +441,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
436 441
437out_destoy_sched: 442out_destoy_sched:
438 sched_destroy_user(new); 443 sched_destroy_user(new);
444 put_user_ns(new->user_ns);
439out_free_user: 445out_free_user:
440 kmem_cache_free(uid_cachep, new); 446 kmem_cache_free(uid_cachep, new);
441out_unlock: 447out_unlock:
@@ -443,33 +449,6 @@ out_unlock:
443 return NULL; 449 return NULL;
444} 450}
445 451
446#ifdef CONFIG_USER_NS
447void release_uids(struct user_namespace *ns)
448{
449 int i;
450 unsigned long flags;
451 struct hlist_head *head;
452 struct hlist_node *nd;
453
454 spin_lock_irqsave(&uidhash_lock, flags);
455 /*
456 * collapse the chains so that the user_struct-s will
457 * be still alive, but not in hashes. subsequent free_uid()
458 * will free them.
459 */
460 for (i = 0; i < UIDHASH_SZ; i++) {
461 head = ns->uidhash_table + i;
462 while (!hlist_empty(head)) {
463 nd = head->first;
464 hlist_del_init(nd);
465 }
466 }
467 spin_unlock_irqrestore(&uidhash_lock, flags);
468
469 free_uid(ns->root_user);
470}
471#endif
472
473static int __init uid_cache_init(void) 452static int __init uid_cache_init(void)
474{ 453{
475 int n; 454 int n;