diff options
Diffstat (limited to 'kernel/user.c')
| -rw-r--r-- | kernel/user.c | 96 |
1 files changed, 25 insertions, 71 deletions
diff --git a/kernel/user.c b/kernel/user.c index 39d6159fae43..6608a3d8ca61 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -16,12 +16,13 @@ | |||
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/user_namespace.h> | 18 | #include <linux/user_namespace.h> |
| 19 | #include "cred-internals.h" | ||
| 19 | 20 | ||
| 20 | struct user_namespace init_user_ns = { | 21 | struct user_namespace init_user_ns = { |
| 21 | .kref = { | 22 | .kref = { |
| 22 | .refcount = ATOMIC_INIT(2), | 23 | .refcount = ATOMIC_INIT(1), |
| 23 | }, | 24 | }, |
| 24 | .root_user = &root_user, | 25 | .creator = &root_user, |
| 25 | }; | 26 | }; |
| 26 | EXPORT_SYMBOL_GPL(init_user_ns); | 27 | EXPORT_SYMBOL_GPL(init_user_ns); |
| 27 | 28 | ||
| @@ -47,12 +48,14 @@ static struct kmem_cache *uid_cachep; | |||
| 47 | */ | 48 | */ |
| 48 | static DEFINE_SPINLOCK(uidhash_lock); | 49 | static DEFINE_SPINLOCK(uidhash_lock); |
| 49 | 50 | ||
| 51 | /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ | ||
| 50 | struct user_struct root_user = { | 52 | struct user_struct root_user = { |
| 51 | .__count = ATOMIC_INIT(1), | 53 | .__count = ATOMIC_INIT(2), |
| 52 | .processes = ATOMIC_INIT(1), | 54 | .processes = ATOMIC_INIT(1), |
| 53 | .files = ATOMIC_INIT(0), | 55 | .files = ATOMIC_INIT(0), |
| 54 | .sigpending = ATOMIC_INIT(0), | 56 | .sigpending = ATOMIC_INIT(0), |
| 55 | .locked_shm = 0, | 57 | .locked_shm = 0, |
| 58 | .user_ns = &init_user_ns, | ||
| 56 | #ifdef CONFIG_USER_SCHED | 59 | #ifdef CONFIG_USER_SCHED |
| 57 | .tg = &init_task_group, | 60 | .tg = &init_task_group, |
| 58 | #endif | 61 | #endif |
| @@ -104,16 +107,10 @@ static int sched_create_user(struct user_struct *up) | |||
| 104 | return rc; | 107 | return rc; |
| 105 | } | 108 | } |
| 106 | 109 | ||
| 107 | static void sched_switch_user(struct task_struct *p) | ||
| 108 | { | ||
| 109 | sched_move_task(p); | ||
| 110 | } | ||
| 111 | |||
| 112 | #else /* CONFIG_USER_SCHED */ | 110 | #else /* CONFIG_USER_SCHED */ |
| 113 | 111 | ||
| 114 | static void sched_destroy_user(struct user_struct *up) { } | 112 | static void sched_destroy_user(struct user_struct *up) { } |
| 115 | static int sched_create_user(struct user_struct *up) { return 0; } | 113 | static int sched_create_user(struct user_struct *up) { return 0; } |
| 116 | static void sched_switch_user(struct task_struct *p) { } | ||
| 117 | 114 | ||
| 118 | #endif /* CONFIG_USER_SCHED */ | 115 | #endif /* CONFIG_USER_SCHED */ |
| 119 | 116 | ||
| @@ -242,13 +239,21 @@ static struct kobj_type uids_ktype = { | |||
| 242 | .release = uids_release, | 239 | .release = uids_release, |
| 243 | }; | 240 | }; |
| 244 | 241 | ||
| 245 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | 242 | /* |
| 243 | * Create /sys/kernel/uids/<uid>/cpu_share file for this user | ||
| 244 | * We do not create this file for users in a user namespace (until | ||
| 245 | * sysfs tagging is implemented). | ||
| 246 | * | ||
| 247 | * See Documentation/scheduler/sched-design-CFS.txt for ramifications. | ||
| 248 | */ | ||
| 246 | static int uids_user_create(struct user_struct *up) | 249 | static int uids_user_create(struct user_struct *up) |
| 247 | { | 250 | { |
| 248 | struct kobject *kobj = &up->kobj; | 251 | struct kobject *kobj = &up->kobj; |
| 249 | int error; | 252 | int error; |
| 250 | 253 | ||
| 251 | memset(kobj, 0, sizeof(struct kobject)); | 254 | memset(kobj, 0, sizeof(struct kobject)); |
| 255 | if (up->user_ns != &init_user_ns) | ||
| 256 | return 0; | ||
| 252 | kobj->kset = uids_kset; | 257 | kobj->kset = uids_kset; |
| 253 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); | 258 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
| 254 | if (error) { | 259 | if (error) { |
| @@ -284,6 +289,8 @@ static void remove_user_sysfs_dir(struct work_struct *w) | |||
| 284 | unsigned long flags; | 289 | unsigned long flags; |
| 285 | int remove_user = 0; | 290 | int remove_user = 0; |
| 286 | 291 | ||
| 292 | if (up->user_ns != &init_user_ns) | ||
| 293 | return; | ||
| 287 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() | 294 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
| 288 | * atomic. | 295 | * atomic. |
| 289 | */ | 296 | */ |
| @@ -319,12 +326,13 @@ done: | |||
| 319 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 326 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
| 320 | * upon function exit. | 327 | * upon function exit. |
| 321 | */ | 328 | */ |
| 322 | static inline void free_user(struct user_struct *up, unsigned long flags) | 329 | static void free_user(struct user_struct *up, unsigned long flags) |
| 323 | { | 330 | { |
| 324 | /* restore back the count */ | 331 | /* restore back the count */ |
| 325 | atomic_inc(&up->__count); | 332 | atomic_inc(&up->__count); |
| 326 | spin_unlock_irqrestore(&uidhash_lock, flags); | 333 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 327 | 334 | ||
| 335 | put_user_ns(up->user_ns); | ||
| 328 | INIT_WORK(&up->work, remove_user_sysfs_dir); | 336 | INIT_WORK(&up->work, remove_user_sysfs_dir); |
| 329 | schedule_work(&up->work); | 337 | schedule_work(&up->work); |
| 330 | } | 338 | } |
| @@ -340,13 +348,14 @@ static inline void uids_mutex_unlock(void) { } | |||
| 340 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 348 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
| 341 | * upon function exit. | 349 | * upon function exit. |
| 342 | */ | 350 | */ |
| 343 | static inline void free_user(struct user_struct *up, unsigned long flags) | 351 | static void free_user(struct user_struct *up, unsigned long flags) |
| 344 | { | 352 | { |
| 345 | uid_hash_remove(up); | 353 | uid_hash_remove(up); |
| 346 | spin_unlock_irqrestore(&uidhash_lock, flags); | 354 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 347 | sched_destroy_user(up); | 355 | sched_destroy_user(up); |
| 348 | key_put(up->uid_keyring); | 356 | key_put(up->uid_keyring); |
| 349 | key_put(up->session_keyring); | 357 | key_put(up->session_keyring); |
| 358 | put_user_ns(up->user_ns); | ||
| 350 | kmem_cache_free(uid_cachep, up); | 359 | kmem_cache_free(uid_cachep, up); |
| 351 | } | 360 | } |
| 352 | 361 | ||
| @@ -362,7 +371,7 @@ struct user_struct *find_user(uid_t uid) | |||
| 362 | { | 371 | { |
| 363 | struct user_struct *ret; | 372 | struct user_struct *ret; |
| 364 | unsigned long flags; | 373 | unsigned long flags; |
| 365 | struct user_namespace *ns = current->nsproxy->user_ns; | 374 | struct user_namespace *ns = current_user_ns(); |
| 366 | 375 | ||
| 367 | spin_lock_irqsave(&uidhash_lock, flags); | 376 | spin_lock_irqsave(&uidhash_lock, flags); |
| 368 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 377 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
| @@ -409,6 +418,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 409 | if (sched_create_user(new) < 0) | 418 | if (sched_create_user(new) < 0) |
| 410 | goto out_free_user; | 419 | goto out_free_user; |
| 411 | 420 | ||
| 421 | new->user_ns = get_user_ns(ns); | ||
| 422 | |||
| 412 | if (uids_user_create(new)) | 423 | if (uids_user_create(new)) |
| 413 | goto out_destoy_sched; | 424 | goto out_destoy_sched; |
| 414 | 425 | ||
| @@ -432,7 +443,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 432 | up = new; | 443 | up = new; |
| 433 | } | 444 | } |
| 434 | spin_unlock_irq(&uidhash_lock); | 445 | spin_unlock_irq(&uidhash_lock); |
| 435 | |||
| 436 | } | 446 | } |
| 437 | 447 | ||
| 438 | uids_mutex_unlock(); | 448 | uids_mutex_unlock(); |
| @@ -441,6 +451,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 441 | 451 | ||
| 442 | out_destoy_sched: | 452 | out_destoy_sched: |
| 443 | sched_destroy_user(new); | 453 | sched_destroy_user(new); |
| 454 | put_user_ns(new->user_ns); | ||
| 444 | out_free_user: | 455 | out_free_user: |
| 445 | kmem_cache_free(uid_cachep, new); | 456 | kmem_cache_free(uid_cachep, new); |
| 446 | out_unlock: | 457 | out_unlock: |
| @@ -448,63 +459,6 @@ out_unlock: | |||
| 448 | return NULL; | 459 | return NULL; |
| 449 | } | 460 | } |
| 450 | 461 | ||
| 451 | void switch_uid(struct user_struct *new_user) | ||
| 452 | { | ||
| 453 | struct user_struct *old_user; | ||
| 454 | |||
| 455 | /* What if a process setreuid()'s and this brings the | ||
| 456 | * new uid over his NPROC rlimit? We can check this now | ||
| 457 | * cheaply with the new uid cache, so if it matters | ||
| 458 | * we should be checking for it. -DaveM | ||
| 459 | */ | ||
| 460 | old_user = current->user; | ||
| 461 | atomic_inc(&new_user->processes); | ||
| 462 | atomic_dec(&old_user->processes); | ||
| 463 | switch_uid_keyring(new_user); | ||
| 464 | current->user = new_user; | ||
| 465 | sched_switch_user(current); | ||
| 466 | |||
| 467 | /* | ||
| 468 | * We need to synchronize with __sigqueue_alloc() | ||
| 469 | * doing a get_uid(p->user).. If that saw the old | ||
| 470 | * user value, we need to wait until it has exited | ||
| 471 | * its critical region before we can free the old | ||
| 472 | * structure. | ||
| 473 | */ | ||
| 474 | smp_mb(); | ||
| 475 | spin_unlock_wait(¤t->sighand->siglock); | ||
| 476 | |||
| 477 | free_uid(old_user); | ||
| 478 | suid_keys(current); | ||
| 479 | } | ||
| 480 | |||
| 481 | #ifdef CONFIG_USER_NS | ||
| 482 | void release_uids(struct user_namespace *ns) | ||
| 483 | { | ||
| 484 | int i; | ||
| 485 | unsigned long flags; | ||
| 486 | struct hlist_head *head; | ||
| 487 | struct hlist_node *nd; | ||
| 488 | |||
| 489 | spin_lock_irqsave(&uidhash_lock, flags); | ||
| 490 | /* | ||
| 491 | * collapse the chains so that the user_struct-s will | ||
| 492 | * be still alive, but not in hashes. subsequent free_uid() | ||
| 493 | * will free them. | ||
| 494 | */ | ||
| 495 | for (i = 0; i < UIDHASH_SZ; i++) { | ||
| 496 | head = ns->uidhash_table + i; | ||
| 497 | while (!hlist_empty(head)) { | ||
| 498 | nd = head->first; | ||
| 499 | hlist_del_init(nd); | ||
| 500 | } | ||
| 501 | } | ||
| 502 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
| 503 | |||
| 504 | free_uid(ns->root_user); | ||
| 505 | } | ||
| 506 | #endif | ||
| 507 | |||
| 508 | static int __init uid_cache_init(void) | 462 | static int __init uid_cache_init(void) |
| 509 | { | 463 | { |
| 510 | int n; | 464 | int n; |
