diff options
| author | Eric W. Biederman <ebiederm@xmission.com> | 2011-11-17 02:20:58 -0500 |
|---|---|---|
| committer | Eric W. Biederman <ebiederm@xmission.com> | 2012-04-07 20:11:46 -0400 |
| commit | 7b44ab978b77a91b327058a0f4db7e6fcdb90b92 (patch) | |
| tree | 632c872f0b88d001f1bddce2c0aacd77bf062454 /kernel | |
| parent | 5673a94c14574d7c6495c320c6b0e480673d54bd (diff) | |
userns: Disassociate user_struct from the user_namespace.
Modify alloc_uid to take a kuid and make the user hash table global.
Stop holding a reference to the user namespace in struct user_struct.
This simplifies the code and makes the per user accounting not
care about which user namespace a uid happens to appear in.
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sys.c | 34 | ||||
| -rw-r--r-- | kernel/user.c | 28 | ||||
| -rw-r--r-- | kernel/user_namespace.c | 6 |
3 files changed, 37 insertions, 31 deletions
diff --git a/kernel/sys.c b/kernel/sys.c index 71852417cfc..f0c43b4b665 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -175,6 +175,8 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) | |||
| 175 | const struct cred *cred = current_cred(); | 175 | const struct cred *cred = current_cred(); |
| 176 | int error = -EINVAL; | 176 | int error = -EINVAL; |
| 177 | struct pid *pgrp; | 177 | struct pid *pgrp; |
| 178 | kuid_t cred_uid; | ||
| 179 | kuid_t uid; | ||
| 178 | 180 | ||
| 179 | if (which > PRIO_USER || which < PRIO_PROCESS) | 181 | if (which > PRIO_USER || which < PRIO_PROCESS) |
| 180 | goto out; | 182 | goto out; |
| @@ -207,18 +209,22 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) | |||
| 207 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); | 209 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
| 208 | break; | 210 | break; |
| 209 | case PRIO_USER: | 211 | case PRIO_USER: |
| 212 | cred_uid = make_kuid(cred->user_ns, cred->uid); | ||
| 213 | uid = make_kuid(cred->user_ns, who); | ||
| 210 | user = cred->user; | 214 | user = cred->user; |
| 211 | if (!who) | 215 | if (!who) |
| 212 | who = cred->uid; | 216 | uid = cred_uid; |
| 213 | else if ((who != cred->uid) && | 217 | else if (!uid_eq(uid, cred_uid) && |
| 214 | !(user = find_user(who))) | 218 | !(user = find_user(uid))) |
| 215 | goto out_unlock; /* No processes for this user */ | 219 | goto out_unlock; /* No processes for this user */ |
| 216 | 220 | ||
| 217 | do_each_thread(g, p) { | 221 | do_each_thread(g, p) { |
| 218 | if (__task_cred(p)->uid == who) | 222 | const struct cred *tcred = __task_cred(p); |
| 223 | kuid_t tcred_uid = make_kuid(tcred->user_ns, tcred->uid); | ||
| 224 | if (uid_eq(tcred_uid, uid)) | ||
| 219 | error = set_one_prio(p, niceval, error); | 225 | error = set_one_prio(p, niceval, error); |
| 220 | } while_each_thread(g, p); | 226 | } while_each_thread(g, p); |
| 221 | if (who != cred->uid) | 227 | if (!uid_eq(uid, cred_uid)) |
| 222 | free_uid(user); /* For find_user() */ | 228 | free_uid(user); /* For find_user() */ |
| 223 | break; | 229 | break; |
| 224 | } | 230 | } |
| @@ -242,6 +248,8 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) | |||
| 242 | const struct cred *cred = current_cred(); | 248 | const struct cred *cred = current_cred(); |
| 243 | long niceval, retval = -ESRCH; | 249 | long niceval, retval = -ESRCH; |
| 244 | struct pid *pgrp; | 250 | struct pid *pgrp; |
| 251 | kuid_t cred_uid; | ||
| 252 | kuid_t uid; | ||
| 245 | 253 | ||
| 246 | if (which > PRIO_USER || which < PRIO_PROCESS) | 254 | if (which > PRIO_USER || which < PRIO_PROCESS) |
| 247 | return -EINVAL; | 255 | return -EINVAL; |
| @@ -272,21 +280,25 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) | |||
| 272 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); | 280 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
| 273 | break; | 281 | break; |
| 274 | case PRIO_USER: | 282 | case PRIO_USER: |
| 283 | cred_uid = make_kuid(cred->user_ns, cred->uid); | ||
| 284 | uid = make_kuid(cred->user_ns, who); | ||
| 275 | user = cred->user; | 285 | user = cred->user; |
| 276 | if (!who) | 286 | if (!who) |
| 277 | who = cred->uid; | 287 | uid = cred_uid; |
| 278 | else if ((who != cred->uid) && | 288 | else if (!uid_eq(uid, cred_uid) && |
| 279 | !(user = find_user(who))) | 289 | !(user = find_user(uid))) |
| 280 | goto out_unlock; /* No processes for this user */ | 290 | goto out_unlock; /* No processes for this user */ |
| 281 | 291 | ||
| 282 | do_each_thread(g, p) { | 292 | do_each_thread(g, p) { |
| 283 | if (__task_cred(p)->uid == who) { | 293 | const struct cred *tcred = __task_cred(p); |
| 294 | kuid_t tcred_uid = make_kuid(tcred->user_ns, tcred->uid); | ||
| 295 | if (uid_eq(tcred_uid, uid)) { | ||
| 284 | niceval = 20 - task_nice(p); | 296 | niceval = 20 - task_nice(p); |
| 285 | if (niceval > retval) | 297 | if (niceval > retval) |
| 286 | retval = niceval; | 298 | retval = niceval; |
| 287 | } | 299 | } |
| 288 | } while_each_thread(g, p); | 300 | } while_each_thread(g, p); |
| 289 | if (who != cred->uid) | 301 | if (!uid_eq(uid, cred_uid)) |
| 290 | free_uid(user); /* for find_user() */ | 302 | free_uid(user); /* for find_user() */ |
| 291 | break; | 303 | break; |
| 292 | } | 304 | } |
| @@ -629,7 +641,7 @@ static int set_user(struct cred *new) | |||
| 629 | { | 641 | { |
| 630 | struct user_struct *new_user; | 642 | struct user_struct *new_user; |
| 631 | 643 | ||
| 632 | new_user = alloc_uid(current_user_ns(), new->uid); | 644 | new_user = alloc_uid(make_kuid(new->user_ns, new->uid)); |
| 633 | if (!new_user) | 645 | if (!new_user) |
| 634 | return -EAGAIN; | 646 | return -EAGAIN; |
| 635 | 647 | ||
diff --git a/kernel/user.c b/kernel/user.c index d65fec0615a..025077e54a7 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -34,11 +34,14 @@ EXPORT_SYMBOL_GPL(init_user_ns); | |||
| 34 | * when changing user ID's (ie setuid() and friends). | 34 | * when changing user ID's (ie setuid() and friends). |
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) | ||
| 38 | #define UIDHASH_SZ (1 << UIDHASH_BITS) | ||
| 37 | #define UIDHASH_MASK (UIDHASH_SZ - 1) | 39 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
| 38 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | 40 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) |
| 39 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) | 41 | #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) |
| 40 | 42 | ||
| 41 | static struct kmem_cache *uid_cachep; | 43 | static struct kmem_cache *uid_cachep; |
| 44 | struct hlist_head uidhash_table[UIDHASH_SZ]; | ||
| 42 | 45 | ||
| 43 | /* | 46 | /* |
| 44 | * The uidhash_lock is mostly taken from process context, but it is | 47 | * The uidhash_lock is mostly taken from process context, but it is |
| @@ -58,7 +61,7 @@ struct user_struct root_user = { | |||
| 58 | .files = ATOMIC_INIT(0), | 61 | .files = ATOMIC_INIT(0), |
| 59 | .sigpending = ATOMIC_INIT(0), | 62 | .sigpending = ATOMIC_INIT(0), |
| 60 | .locked_shm = 0, | 63 | .locked_shm = 0, |
| 61 | ._user_ns = &init_user_ns, | 64 | .uid = GLOBAL_ROOT_UID, |
| 62 | }; | 65 | }; |
| 63 | 66 | ||
| 64 | /* | 67 | /* |
| @@ -72,16 +75,15 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | |||
| 72 | static void uid_hash_remove(struct user_struct *up) | 75 | static void uid_hash_remove(struct user_struct *up) |
| 73 | { | 76 | { |
| 74 | hlist_del_init(&up->uidhash_node); | 77 | hlist_del_init(&up->uidhash_node); |
| 75 | put_user_ns(up->_user_ns); /* It is safe to free the uid hash table now */ | ||
| 76 | } | 78 | } |
| 77 | 79 | ||
| 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 80 | static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) |
| 79 | { | 81 | { |
| 80 | struct user_struct *user; | 82 | struct user_struct *user; |
| 81 | struct hlist_node *h; | 83 | struct hlist_node *h; |
| 82 | 84 | ||
| 83 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | 85 | hlist_for_each_entry(user, h, hashent, uidhash_node) { |
| 84 | if (user->uid == uid) { | 86 | if (uid_eq(user->uid, uid)) { |
| 85 | atomic_inc(&user->__count); | 87 | atomic_inc(&user->__count); |
| 86 | return user; | 88 | return user; |
| 87 | } | 89 | } |
| @@ -110,14 +112,13 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
| 110 | * | 112 | * |
| 111 | * If the user_struct could not be found, return NULL. | 113 | * If the user_struct could not be found, return NULL. |
| 112 | */ | 114 | */ |
| 113 | struct user_struct *find_user(uid_t uid) | 115 | struct user_struct *find_user(kuid_t uid) |
| 114 | { | 116 | { |
| 115 | struct user_struct *ret; | 117 | struct user_struct *ret; |
| 116 | unsigned long flags; | 118 | unsigned long flags; |
| 117 | struct user_namespace *ns = current_user_ns(); | ||
| 118 | 119 | ||
| 119 | spin_lock_irqsave(&uidhash_lock, flags); | 120 | spin_lock_irqsave(&uidhash_lock, flags); |
| 120 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 121 | ret = uid_hash_find(uid, uidhashentry(uid)); |
| 121 | spin_unlock_irqrestore(&uidhash_lock, flags); | 122 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 122 | return ret; | 123 | return ret; |
| 123 | } | 124 | } |
| @@ -136,9 +137,9 @@ void free_uid(struct user_struct *up) | |||
| 136 | local_irq_restore(flags); | 137 | local_irq_restore(flags); |
| 137 | } | 138 | } |
| 138 | 139 | ||
| 139 | struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | 140 | struct user_struct *alloc_uid(kuid_t uid) |
| 140 | { | 141 | { |
| 141 | struct hlist_head *hashent = uidhashentry(ns, uid); | 142 | struct hlist_head *hashent = uidhashentry(uid); |
| 142 | struct user_struct *up, *new; | 143 | struct user_struct *up, *new; |
| 143 | 144 | ||
| 144 | spin_lock_irq(&uidhash_lock); | 145 | spin_lock_irq(&uidhash_lock); |
| @@ -153,8 +154,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 153 | new->uid = uid; | 154 | new->uid = uid; |
| 154 | atomic_set(&new->__count, 1); | 155 | atomic_set(&new->__count, 1); |
| 155 | 156 | ||
| 156 | new->_user_ns = get_user_ns(ns); | ||
| 157 | |||
| 158 | /* | 157 | /* |
| 159 | * Before adding this, check whether we raced | 158 | * Before adding this, check whether we raced |
| 160 | * on adding the same user already.. | 159 | * on adding the same user already.. |
| @@ -162,7 +161,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 162 | spin_lock_irq(&uidhash_lock); | 161 | spin_lock_irq(&uidhash_lock); |
| 163 | up = uid_hash_find(uid, hashent); | 162 | up = uid_hash_find(uid, hashent); |
| 164 | if (up) { | 163 | if (up) { |
| 165 | put_user_ns(ns); | ||
| 166 | key_put(new->uid_keyring); | 164 | key_put(new->uid_keyring); |
| 167 | key_put(new->session_keyring); | 165 | key_put(new->session_keyring); |
| 168 | kmem_cache_free(uid_cachep, new); | 166 | kmem_cache_free(uid_cachep, new); |
| @@ -187,11 +185,11 @@ static int __init uid_cache_init(void) | |||
| 187 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 185 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
| 188 | 186 | ||
| 189 | for(n = 0; n < UIDHASH_SZ; ++n) | 187 | for(n = 0; n < UIDHASH_SZ; ++n) |
| 190 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); | 188 | INIT_HLIST_HEAD(uidhash_table + n); |
| 191 | 189 | ||
| 192 | /* Insert the root user immediately (init already runs as root) */ | 190 | /* Insert the root user immediately (init already runs as root) */ |
| 193 | spin_lock_irq(&uidhash_lock); | 191 | spin_lock_irq(&uidhash_lock); |
| 194 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); | 192 | uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); |
| 195 | spin_unlock_irq(&uidhash_lock); | 193 | spin_unlock_irq(&uidhash_lock); |
| 196 | 194 | ||
| 197 | return 0; | 195 | return 0; |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index e216e1e8ce8..898e973bd1e 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -27,7 +27,6 @@ int create_user_ns(struct cred *new) | |||
| 27 | { | 27 | { |
| 28 | struct user_namespace *ns, *parent_ns = new->user_ns; | 28 | struct user_namespace *ns, *parent_ns = new->user_ns; |
| 29 | struct user_struct *root_user; | 29 | struct user_struct *root_user; |
| 30 | int n; | ||
| 31 | 30 | ||
| 32 | ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL); | 31 | ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL); |
| 33 | if (!ns) | 32 | if (!ns) |
| @@ -35,11 +34,8 @@ int create_user_ns(struct cred *new) | |||
| 35 | 34 | ||
| 36 | kref_init(&ns->kref); | 35 | kref_init(&ns->kref); |
| 37 | 36 | ||
| 38 | for (n = 0; n < UIDHASH_SZ; ++n) | ||
| 39 | INIT_HLIST_HEAD(ns->uidhash_table + n); | ||
| 40 | |||
| 41 | /* Alloc new root user. */ | 37 | /* Alloc new root user. */ |
| 42 | root_user = alloc_uid(ns, 0); | 38 | root_user = alloc_uid(make_kuid(ns, 0)); |
| 43 | if (!root_user) { | 39 | if (!root_user) { |
| 44 | kmem_cache_free(user_ns_cachep, ns); | 40 | kmem_cache_free(user_ns_cachep, ns); |
| 45 | return -ENOMEM; | 41 | return -ENOMEM; |
