diff options
Diffstat (limited to 'kernel/user.c')
-rw-r--r-- | kernel/user.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/user.c b/kernel/user.c index 4869563080e9..98b82507797a 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -14,20 +14,19 @@ | |||
14 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
15 | #include <linux/key.h> | 15 | #include <linux/key.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/module.h> | ||
18 | #include <linux/user_namespace.h> | ||
17 | 19 | ||
18 | /* | 20 | /* |
19 | * UID task count cache, to get fast user lookup in "alloc_uid" | 21 | * UID task count cache, to get fast user lookup in "alloc_uid" |
20 | * when changing user ID's (ie setuid() and friends). | 22 | * when changing user ID's (ie setuid() and friends). |
21 | */ | 23 | */ |
22 | 24 | ||
23 | #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8) | ||
24 | #define UIDHASH_SZ (1 << UIDHASH_BITS) | ||
25 | #define UIDHASH_MASK (UIDHASH_SZ - 1) | 25 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
26 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | 26 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) |
27 | #define uidhashentry(uid) (uidhash_table + __uidhashfn((uid))) | 27 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
28 | 28 | ||
29 | static struct kmem_cache *uid_cachep; | 29 | static struct kmem_cache *uid_cachep; |
30 | static struct list_head uidhash_table[UIDHASH_SZ]; | ||
31 | 30 | ||
32 | /* | 31 | /* |
33 | * The uidhash_lock is mostly taken from process context, but it is | 32 | * The uidhash_lock is mostly taken from process context, but it is |
@@ -94,9 +93,10 @@ struct user_struct *find_user(uid_t uid) | |||
94 | { | 93 | { |
95 | struct user_struct *ret; | 94 | struct user_struct *ret; |
96 | unsigned long flags; | 95 | unsigned long flags; |
96 | struct user_namespace *ns = current->nsproxy->user_ns; | ||
97 | 97 | ||
98 | spin_lock_irqsave(&uidhash_lock, flags); | 98 | spin_lock_irqsave(&uidhash_lock, flags); |
99 | ret = uid_hash_find(uid, uidhashentry(uid)); | 99 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
100 | spin_unlock_irqrestore(&uidhash_lock, flags); | 100 | spin_unlock_irqrestore(&uidhash_lock, flags); |
101 | return ret; | 101 | return ret; |
102 | } | 102 | } |
@@ -120,9 +120,9 @@ void free_uid(struct user_struct *up) | |||
120 | } | 120 | } |
121 | } | 121 | } |
122 | 122 | ||
123 | struct user_struct * alloc_uid(uid_t uid) | 123 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) |
124 | { | 124 | { |
125 | struct list_head *hashent = uidhashentry(uid); | 125 | struct list_head *hashent = uidhashentry(ns, uid); |
126 | struct user_struct *up; | 126 | struct user_struct *up; |
127 | 127 | ||
128 | spin_lock_irq(&uidhash_lock); | 128 | spin_lock_irq(&uidhash_lock); |
@@ -211,11 +211,11 @@ static int __init uid_cache_init(void) | |||
211 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 211 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
212 | 212 | ||
213 | for(n = 0; n < UIDHASH_SZ; ++n) | 213 | for(n = 0; n < UIDHASH_SZ; ++n) |
214 | INIT_LIST_HEAD(uidhash_table + n); | 214 | INIT_LIST_HEAD(init_user_ns.uidhash_table + n); |
215 | 215 | ||
216 | /* Insert the root user immediately (init already runs as root) */ | 216 | /* Insert the root user immediately (init already runs as root) */ |
217 | spin_lock_irq(&uidhash_lock); | 217 | spin_lock_irq(&uidhash_lock); |
218 | uid_hash_insert(&root_user, uidhashentry(0)); | 218 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
219 | spin_unlock_irq(&uidhash_lock); | 219 | spin_unlock_irq(&uidhash_lock); |
220 | 220 | ||
221 | return 0; | 221 | return 0; |