aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/user.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/user.c')
-rw-r--r--kernel/user.c96
1 files changed, 25 insertions, 71 deletions
diff --git a/kernel/user.c b/kernel/user.c
index cec2224bc9f5..477b6660f447 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,12 +16,13 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/user_namespace.h> 18#include <linux/user_namespace.h>
19#include "cred-internals.h"
19 20
20struct user_namespace init_user_ns = { 21struct user_namespace init_user_ns = {
21 .kref = { 22 .kref = {
22 .refcount = ATOMIC_INIT(2), 23 .refcount = ATOMIC_INIT(1),
23 }, 24 },
24 .root_user = &root_user, 25 .creator = &root_user,
25}; 26};
26EXPORT_SYMBOL_GPL(init_user_ns); 27EXPORT_SYMBOL_GPL(init_user_ns);
27 28
@@ -47,12 +48,14 @@ static struct kmem_cache *uid_cachep;
47 */ 48 */
48static DEFINE_SPINLOCK(uidhash_lock); 49static DEFINE_SPINLOCK(uidhash_lock);
49 50
51/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
50struct user_struct root_user = { 52struct user_struct root_user = {
51 .__count = ATOMIC_INIT(1), 53 .__count = ATOMIC_INIT(2),
52 .processes = ATOMIC_INIT(1), 54 .processes = ATOMIC_INIT(1),
53 .files = ATOMIC_INIT(0), 55 .files = ATOMIC_INIT(0),
54 .sigpending = ATOMIC_INIT(0), 56 .sigpending = ATOMIC_INIT(0),
55 .locked_shm = 0, 57 .locked_shm = 0,
58 .user_ns = &init_user_ns,
56#ifdef CONFIG_USER_SCHED 59#ifdef CONFIG_USER_SCHED
57 .tg = &init_task_group, 60 .tg = &init_task_group,
58#endif 61#endif
@@ -106,16 +109,10 @@ static int sched_create_user(struct user_struct *up)
106 return rc; 109 return rc;
107} 110}
108 111
109static void sched_switch_user(struct task_struct *p)
110{
111 sched_move_task(p);
112}
113
114#else /* CONFIG_USER_SCHED */ 112#else /* CONFIG_USER_SCHED */
115 113
116static void sched_destroy_user(struct user_struct *up) { } 114static void sched_destroy_user(struct user_struct *up) { }
117static int sched_create_user(struct user_struct *up) { return 0; } 115static int sched_create_user(struct user_struct *up) { return 0; }
118static void sched_switch_user(struct task_struct *p) { }
119 116
120#endif /* CONFIG_USER_SCHED */ 117#endif /* CONFIG_USER_SCHED */
121 118
@@ -244,13 +241,21 @@ static struct kobj_type uids_ktype = {
244 .release = uids_release, 241 .release = uids_release,
245}; 242};
246 243
247/* create /sys/kernel/uids/<uid>/cpu_share file for this user */ 244/*
245 * Create /sys/kernel/uids/<uid>/cpu_share file for this user
246 * We do not create this file for users in a user namespace (until
247 * sysfs tagging is implemented).
248 *
249 * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
250 */
248static int uids_user_create(struct user_struct *up) 251static int uids_user_create(struct user_struct *up)
249{ 252{
250 struct kobject *kobj = &up->kobj; 253 struct kobject *kobj = &up->kobj;
251 int error; 254 int error;
252 255
253 memset(kobj, 0, sizeof(struct kobject)); 256 memset(kobj, 0, sizeof(struct kobject));
257 if (up->user_ns != &init_user_ns)
258 return 0;
254 kobj->kset = uids_kset; 259 kobj->kset = uids_kset;
255 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); 260 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
256 if (error) { 261 if (error) {
@@ -286,6 +291,8 @@ static void remove_user_sysfs_dir(struct work_struct *w)
286 unsigned long flags; 291 unsigned long flags;
287 int remove_user = 0; 292 int remove_user = 0;
288 293
294 if (up->user_ns != &init_user_ns)
295 return;
289 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() 296 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
290 * atomic. 297 * atomic.
291 */ 298 */
@@ -321,12 +328,13 @@ done:
321 * IRQ state (as stored in flags) is restored and uidhash_lock released 328 * IRQ state (as stored in flags) is restored and uidhash_lock released
322 * upon function exit. 329 * upon function exit.
323 */ 330 */
324static inline void free_user(struct user_struct *up, unsigned long flags) 331static void free_user(struct user_struct *up, unsigned long flags)
325{ 332{
326 /* restore back the count */ 333 /* restore back the count */
327 atomic_inc(&up->__count); 334 atomic_inc(&up->__count);
328 spin_unlock_irqrestore(&uidhash_lock, flags); 335 spin_unlock_irqrestore(&uidhash_lock, flags);
329 336
337 put_user_ns(up->user_ns);
330 INIT_WORK(&up->work, remove_user_sysfs_dir); 338 INIT_WORK(&up->work, remove_user_sysfs_dir);
331 schedule_work(&up->work); 339 schedule_work(&up->work);
332} 340}
@@ -342,13 +350,14 @@ static inline void uids_mutex_unlock(void) { }
342 * IRQ state (as stored in flags) is restored and uidhash_lock released 350 * IRQ state (as stored in flags) is restored and uidhash_lock released
343 * upon function exit. 351 * upon function exit.
344 */ 352 */
345static inline void free_user(struct user_struct *up, unsigned long flags) 353static void free_user(struct user_struct *up, unsigned long flags)
346{ 354{
347 uid_hash_remove(up); 355 uid_hash_remove(up);
348 spin_unlock_irqrestore(&uidhash_lock, flags); 356 spin_unlock_irqrestore(&uidhash_lock, flags);
349 sched_destroy_user(up); 357 sched_destroy_user(up);
350 key_put(up->uid_keyring); 358 key_put(up->uid_keyring);
351 key_put(up->session_keyring); 359 key_put(up->session_keyring);
360 put_user_ns(up->user_ns);
352 kmem_cache_free(uid_cachep, up); 361 kmem_cache_free(uid_cachep, up);
353} 362}
354 363
@@ -364,7 +373,7 @@ struct user_struct *find_user(uid_t uid)
364{ 373{
365 struct user_struct *ret; 374 struct user_struct *ret;
366 unsigned long flags; 375 unsigned long flags;
367 struct user_namespace *ns = current->nsproxy->user_ns; 376 struct user_namespace *ns = current_user_ns();
368 377
369 spin_lock_irqsave(&uidhash_lock, flags); 378 spin_lock_irqsave(&uidhash_lock, flags);
370 ret = uid_hash_find(uid, uidhashentry(ns, uid)); 379 ret = uid_hash_find(uid, uidhashentry(ns, uid));
@@ -411,6 +420,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
411 if (sched_create_user(new) < 0) 420 if (sched_create_user(new) < 0)
412 goto out_free_user; 421 goto out_free_user;
413 422
423 new->user_ns = get_user_ns(ns);
424
414 if (uids_user_create(new)) 425 if (uids_user_create(new))
415 goto out_destoy_sched; 426 goto out_destoy_sched;
416 427
@@ -434,7 +445,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
434 up = new; 445 up = new;
435 } 446 }
436 spin_unlock_irq(&uidhash_lock); 447 spin_unlock_irq(&uidhash_lock);
437
438 } 448 }
439 449
440 uids_mutex_unlock(); 450 uids_mutex_unlock();
@@ -443,6 +453,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
443 453
444out_destoy_sched: 454out_destoy_sched:
445 sched_destroy_user(new); 455 sched_destroy_user(new);
456 put_user_ns(new->user_ns);
446out_free_user: 457out_free_user:
447 kmem_cache_free(uid_cachep, new); 458 kmem_cache_free(uid_cachep, new);
448out_unlock: 459out_unlock:
@@ -450,63 +461,6 @@ out_unlock:
450 return NULL; 461 return NULL;
451} 462}
452 463
453void switch_uid(struct user_struct *new_user)
454{
455 struct user_struct *old_user;
456
457 /* What if a process setreuid()'s and this brings the
458 * new uid over his NPROC rlimit? We can check this now
459 * cheaply with the new uid cache, so if it matters
460 * we should be checking for it. -DaveM
461 */
462 old_user = current->user;
463 atomic_inc(&new_user->processes);
464 atomic_dec(&old_user->processes);
465 switch_uid_keyring(new_user);
466 current->user = new_user;
467 sched_switch_user(current);
468
469 /*
470 * We need to synchronize with __sigqueue_alloc()
471 * doing a get_uid(p->user).. If that saw the old
472 * user value, we need to wait until it has exited
473 * its critical region before we can free the old
474 * structure.
475 */
476 smp_mb();
477 spin_unlock_wait(&current->sighand->siglock);
478
479 free_uid(old_user);
480 suid_keys(current);
481}
482
483#ifdef CONFIG_USER_NS
484void release_uids(struct user_namespace *ns)
485{
486 int i;
487 unsigned long flags;
488 struct hlist_head *head;
489 struct hlist_node *nd;
490
491 spin_lock_irqsave(&uidhash_lock, flags);
492 /*
493 * collapse the chains so that the user_struct-s will
494 * be still alive, but not in hashes. subsequent free_uid()
495 * will free them.
496 */
497 for (i = 0; i < UIDHASH_SZ; i++) {
498 head = ns->uidhash_table + i;
499 while (!hlist_empty(head)) {
500 nd = head->first;
501 hlist_del_init(nd);
502 }
503 }
504 spin_unlock_irqrestore(&uidhash_lock, flags);
505
506 free_uid(ns->root_user);
507}
508#endif
509
510static int __init uid_cache_init(void) 464static int __init uid_cache_init(void)
511{ 465{
512 int n; 466 int n;