diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-12-29 03:42:58 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-12-29 04:37:07 -0500 |
| commit | 0ce74d9296c971b2355c26984ad0bc538e34dd6c (patch) | |
| tree | 566d03e2a4f6b42dab9628cd82c93cd61d587467 /kernel/user.c | |
| parent | 1cc4fff0b360aeffeedb7d6db5089d88dd861700 (diff) | |
| parent | 3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff) | |
Merge branch 'linus' into timers/hrtimers
Conflicts:
sound/drivers/pcsp/pcsp.c
Semantic conflict:
sound/core/hrtimer.c
Diffstat (limited to 'kernel/user.c')
| -rw-r--r-- | kernel/user.c | 98 |
1 files changed, 27 insertions, 71 deletions
diff --git a/kernel/user.c b/kernel/user.c index 39d6159fae43..477b6660f447 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -16,12 +16,13 @@ | |||
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/user_namespace.h> | 18 | #include <linux/user_namespace.h> |
| 19 | #include "cred-internals.h" | ||
| 19 | 20 | ||
| 20 | struct user_namespace init_user_ns = { | 21 | struct user_namespace init_user_ns = { |
| 21 | .kref = { | 22 | .kref = { |
| 22 | .refcount = ATOMIC_INIT(2), | 23 | .refcount = ATOMIC_INIT(1), |
| 23 | }, | 24 | }, |
| 24 | .root_user = &root_user, | 25 | .creator = &root_user, |
| 25 | }; | 26 | }; |
| 26 | EXPORT_SYMBOL_GPL(init_user_ns); | 27 | EXPORT_SYMBOL_GPL(init_user_ns); |
| 27 | 28 | ||
| @@ -47,12 +48,14 @@ static struct kmem_cache *uid_cachep; | |||
| 47 | */ | 48 | */ |
| 48 | static DEFINE_SPINLOCK(uidhash_lock); | 49 | static DEFINE_SPINLOCK(uidhash_lock); |
| 49 | 50 | ||
| 51 | /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ | ||
| 50 | struct user_struct root_user = { | 52 | struct user_struct root_user = { |
| 51 | .__count = ATOMIC_INIT(1), | 53 | .__count = ATOMIC_INIT(2), |
| 52 | .processes = ATOMIC_INIT(1), | 54 | .processes = ATOMIC_INIT(1), |
| 53 | .files = ATOMIC_INIT(0), | 55 | .files = ATOMIC_INIT(0), |
| 54 | .sigpending = ATOMIC_INIT(0), | 56 | .sigpending = ATOMIC_INIT(0), |
| 55 | .locked_shm = 0, | 57 | .locked_shm = 0, |
| 58 | .user_ns = &init_user_ns, | ||
| 56 | #ifdef CONFIG_USER_SCHED | 59 | #ifdef CONFIG_USER_SCHED |
| 57 | .tg = &init_task_group, | 60 | .tg = &init_task_group, |
| 58 | #endif | 61 | #endif |
| @@ -101,19 +104,15 @@ static int sched_create_user(struct user_struct *up) | |||
| 101 | if (IS_ERR(up->tg)) | 104 | if (IS_ERR(up->tg)) |
| 102 | rc = -ENOMEM; | 105 | rc = -ENOMEM; |
| 103 | 106 | ||
| 104 | return rc; | 107 | set_tg_uid(up); |
| 105 | } | ||
| 106 | 108 | ||
| 107 | static void sched_switch_user(struct task_struct *p) | 109 | return rc; |
| 108 | { | ||
| 109 | sched_move_task(p); | ||
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | #else /* CONFIG_USER_SCHED */ | 112 | #else /* CONFIG_USER_SCHED */ |
| 113 | 113 | ||
| 114 | static void sched_destroy_user(struct user_struct *up) { } | 114 | static void sched_destroy_user(struct user_struct *up) { } |
| 115 | static int sched_create_user(struct user_struct *up) { return 0; } | 115 | static int sched_create_user(struct user_struct *up) { return 0; } |
| 116 | static void sched_switch_user(struct task_struct *p) { } | ||
| 117 | 116 | ||
| 118 | #endif /* CONFIG_USER_SCHED */ | 117 | #endif /* CONFIG_USER_SCHED */ |
| 119 | 118 | ||
| @@ -242,13 +241,21 @@ static struct kobj_type uids_ktype = { | |||
| 242 | .release = uids_release, | 241 | .release = uids_release, |
| 243 | }; | 242 | }; |
| 244 | 243 | ||
| 245 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | 244 | /* |
| 245 | * Create /sys/kernel/uids/<uid>/cpu_share file for this user | ||
| 246 | * We do not create this file for users in a user namespace (until | ||
| 247 | * sysfs tagging is implemented). | ||
| 248 | * | ||
| 249 | * See Documentation/scheduler/sched-design-CFS.txt for ramifications. | ||
| 250 | */ | ||
| 246 | static int uids_user_create(struct user_struct *up) | 251 | static int uids_user_create(struct user_struct *up) |
| 247 | { | 252 | { |
| 248 | struct kobject *kobj = &up->kobj; | 253 | struct kobject *kobj = &up->kobj; |
| 249 | int error; | 254 | int error; |
| 250 | 255 | ||
| 251 | memset(kobj, 0, sizeof(struct kobject)); | 256 | memset(kobj, 0, sizeof(struct kobject)); |
| 257 | if (up->user_ns != &init_user_ns) | ||
| 258 | return 0; | ||
| 252 | kobj->kset = uids_kset; | 259 | kobj->kset = uids_kset; |
| 253 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); | 260 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
| 254 | if (error) { | 261 | if (error) { |
| @@ -284,6 +291,8 @@ static void remove_user_sysfs_dir(struct work_struct *w) | |||
| 284 | unsigned long flags; | 291 | unsigned long flags; |
| 285 | int remove_user = 0; | 292 | int remove_user = 0; |
| 286 | 293 | ||
| 294 | if (up->user_ns != &init_user_ns) | ||
| 295 | return; | ||
| 287 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() | 296 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
| 288 | * atomic. | 297 | * atomic. |
| 289 | */ | 298 | */ |
| @@ -319,12 +328,13 @@ done: | |||
| 319 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 328 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
| 320 | * upon function exit. | 329 | * upon function exit. |
| 321 | */ | 330 | */ |
| 322 | static inline void free_user(struct user_struct *up, unsigned long flags) | 331 | static void free_user(struct user_struct *up, unsigned long flags) |
| 323 | { | 332 | { |
| 324 | /* restore back the count */ | 333 | /* restore back the count */ |
| 325 | atomic_inc(&up->__count); | 334 | atomic_inc(&up->__count); |
| 326 | spin_unlock_irqrestore(&uidhash_lock, flags); | 335 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 327 | 336 | ||
| 337 | put_user_ns(up->user_ns); | ||
| 328 | INIT_WORK(&up->work, remove_user_sysfs_dir); | 338 | INIT_WORK(&up->work, remove_user_sysfs_dir); |
| 329 | schedule_work(&up->work); | 339 | schedule_work(&up->work); |
| 330 | } | 340 | } |
| @@ -340,13 +350,14 @@ static inline void uids_mutex_unlock(void) { } | |||
| 340 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 350 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
| 341 | * upon function exit. | 351 | * upon function exit. |
| 342 | */ | 352 | */ |
| 343 | static inline void free_user(struct user_struct *up, unsigned long flags) | 353 | static void free_user(struct user_struct *up, unsigned long flags) |
| 344 | { | 354 | { |
| 345 | uid_hash_remove(up); | 355 | uid_hash_remove(up); |
| 346 | spin_unlock_irqrestore(&uidhash_lock, flags); | 356 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 347 | sched_destroy_user(up); | 357 | sched_destroy_user(up); |
| 348 | key_put(up->uid_keyring); | 358 | key_put(up->uid_keyring); |
| 349 | key_put(up->session_keyring); | 359 | key_put(up->session_keyring); |
| 360 | put_user_ns(up->user_ns); | ||
| 350 | kmem_cache_free(uid_cachep, up); | 361 | kmem_cache_free(uid_cachep, up); |
| 351 | } | 362 | } |
| 352 | 363 | ||
| @@ -362,7 +373,7 @@ struct user_struct *find_user(uid_t uid) | |||
| 362 | { | 373 | { |
| 363 | struct user_struct *ret; | 374 | struct user_struct *ret; |
| 364 | unsigned long flags; | 375 | unsigned long flags; |
| 365 | struct user_namespace *ns = current->nsproxy->user_ns; | 376 | struct user_namespace *ns = current_user_ns(); |
| 366 | 377 | ||
| 367 | spin_lock_irqsave(&uidhash_lock, flags); | 378 | spin_lock_irqsave(&uidhash_lock, flags); |
| 368 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 379 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
| @@ -409,6 +420,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 409 | if (sched_create_user(new) < 0) | 420 | if (sched_create_user(new) < 0) |
| 410 | goto out_free_user; | 421 | goto out_free_user; |
| 411 | 422 | ||
| 423 | new->user_ns = get_user_ns(ns); | ||
| 424 | |||
| 412 | if (uids_user_create(new)) | 425 | if (uids_user_create(new)) |
| 413 | goto out_destoy_sched; | 426 | goto out_destoy_sched; |
| 414 | 427 | ||
| @@ -432,7 +445,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 432 | up = new; | 445 | up = new; |
| 433 | } | 446 | } |
| 434 | spin_unlock_irq(&uidhash_lock); | 447 | spin_unlock_irq(&uidhash_lock); |
| 435 | |||
| 436 | } | 448 | } |
| 437 | 449 | ||
| 438 | uids_mutex_unlock(); | 450 | uids_mutex_unlock(); |
| @@ -441,6 +453,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 441 | 453 | ||
| 442 | out_destoy_sched: | 454 | out_destoy_sched: |
| 443 | sched_destroy_user(new); | 455 | sched_destroy_user(new); |
| 456 | put_user_ns(new->user_ns); | ||
| 444 | out_free_user: | 457 | out_free_user: |
| 445 | kmem_cache_free(uid_cachep, new); | 458 | kmem_cache_free(uid_cachep, new); |
| 446 | out_unlock: | 459 | out_unlock: |
| @@ -448,63 +461,6 @@ out_unlock: | |||
| 448 | return NULL; | 461 | return NULL; |
| 449 | } | 462 | } |
| 450 | 463 | ||
| 451 | void switch_uid(struct user_struct *new_user) | ||
| 452 | { | ||
| 453 | struct user_struct *old_user; | ||
| 454 | |||
| 455 | /* What if a process setreuid()'s and this brings the | ||
| 456 | * new uid over his NPROC rlimit? We can check this now | ||
| 457 | * cheaply with the new uid cache, so if it matters | ||
| 458 | * we should be checking for it. -DaveM | ||
| 459 | */ | ||
| 460 | old_user = current->user; | ||
| 461 | atomic_inc(&new_user->processes); | ||
| 462 | atomic_dec(&old_user->processes); | ||
| 463 | switch_uid_keyring(new_user); | ||
| 464 | current->user = new_user; | ||
| 465 | sched_switch_user(current); | ||
| 466 | |||
| 467 | /* | ||
| 468 | * We need to synchronize with __sigqueue_alloc() | ||
| 469 | * doing a get_uid(p->user).. If that saw the old | ||
| 470 | * user value, we need to wait until it has exited | ||
| 471 | * its critical region before we can free the old | ||
| 472 | * structure. | ||
| 473 | */ | ||
| 474 | smp_mb(); | ||
| 475 | spin_unlock_wait(¤t->sighand->siglock); | ||
| 476 | |||
| 477 | free_uid(old_user); | ||
| 478 | suid_keys(current); | ||
| 479 | } | ||
| 480 | |||
| 481 | #ifdef CONFIG_USER_NS | ||
| 482 | void release_uids(struct user_namespace *ns) | ||
| 483 | { | ||
| 484 | int i; | ||
| 485 | unsigned long flags; | ||
| 486 | struct hlist_head *head; | ||
| 487 | struct hlist_node *nd; | ||
| 488 | |||
| 489 | spin_lock_irqsave(&uidhash_lock, flags); | ||
| 490 | /* | ||
| 491 | * collapse the chains so that the user_struct-s will | ||
| 492 | * be still alive, but not in hashes. subsequent free_uid() | ||
| 493 | * will free them. | ||
| 494 | */ | ||
| 495 | for (i = 0; i < UIDHASH_SZ; i++) { | ||
| 496 | head = ns->uidhash_table + i; | ||
| 497 | while (!hlist_empty(head)) { | ||
| 498 | nd = head->first; | ||
| 499 | hlist_del_init(nd); | ||
| 500 | } | ||
| 501 | } | ||
| 502 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
| 503 | |||
| 504 | free_uid(ns->root_user); | ||
| 505 | } | ||
| 506 | #endif | ||
| 507 | |||
| 508 | static int __init uid_cache_init(void) | 464 | static int __init uid_cache_init(void) |
| 509 | { | 465 | { |
| 510 | int n; | 466 | int n; |
