aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/user.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/user.c')
-rw-r--r--kernel/user.c51
1 files changed, 33 insertions, 18 deletions
diff --git a/kernel/user.c b/kernel/user.c
index 71dd2363ab0f..b815fefbe76f 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -22,10 +22,27 @@
22 * and 1 for... ? 22 * and 1 for... ?
23 */ 23 */
24struct user_namespace init_user_ns = { 24struct user_namespace init_user_ns = {
25 .uid_map = {
26 .nr_extents = 1,
27 .extent[0] = {
28 .first = 0,
29 .lower_first = 0,
30 .count = 4294967295U,
31 },
32 },
33 .gid_map = {
34 .nr_extents = 1,
35 .extent[0] = {
36 .first = 0,
37 .lower_first = 0,
38 .count = 4294967295U,
39 },
40 },
25 .kref = { 41 .kref = {
26 .refcount = ATOMIC_INIT(3), 42 .refcount = ATOMIC_INIT(3),
27 }, 43 },
28 .creator = &root_user, 44 .owner = GLOBAL_ROOT_UID,
45 .group = GLOBAL_ROOT_GID,
29}; 46};
30EXPORT_SYMBOL_GPL(init_user_ns); 47EXPORT_SYMBOL_GPL(init_user_ns);
31 48
@@ -34,11 +51,14 @@ EXPORT_SYMBOL_GPL(init_user_ns);
34 * when changing user ID's (ie setuid() and friends). 51 * when changing user ID's (ie setuid() and friends).
35 */ 52 */
36 53
54#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
55#define UIDHASH_SZ (1 << UIDHASH_BITS)
37#define UIDHASH_MASK (UIDHASH_SZ - 1) 56#define UIDHASH_MASK (UIDHASH_SZ - 1)
38#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) 57#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
39#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) 58#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
40 59
41static struct kmem_cache *uid_cachep; 60static struct kmem_cache *uid_cachep;
61struct hlist_head uidhash_table[UIDHASH_SZ];
42 62
43/* 63/*
44 * The uidhash_lock is mostly taken from process context, but it is 64 * The uidhash_lock is mostly taken from process context, but it is
@@ -51,14 +71,14 @@ static struct kmem_cache *uid_cachep;
51 */ 71 */
52static DEFINE_SPINLOCK(uidhash_lock); 72static DEFINE_SPINLOCK(uidhash_lock);
53 73
54/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->user_ns */ 74/* root_user.__count is 1, for init task cred */
55struct user_struct root_user = { 75struct user_struct root_user = {
56 .__count = ATOMIC_INIT(2), 76 .__count = ATOMIC_INIT(1),
57 .processes = ATOMIC_INIT(1), 77 .processes = ATOMIC_INIT(1),
58 .files = ATOMIC_INIT(0), 78 .files = ATOMIC_INIT(0),
59 .sigpending = ATOMIC_INIT(0), 79 .sigpending = ATOMIC_INIT(0),
60 .locked_shm = 0, 80 .locked_shm = 0,
61 .user_ns = &init_user_ns, 81 .uid = GLOBAL_ROOT_UID,
62}; 82};
63 83
64/* 84/*
@@ -72,16 +92,15 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
72static void uid_hash_remove(struct user_struct *up) 92static void uid_hash_remove(struct user_struct *up)
73{ 93{
74 hlist_del_init(&up->uidhash_node); 94 hlist_del_init(&up->uidhash_node);
75 put_user_ns(up->user_ns);
76} 95}
77 96
78static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) 97static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
79{ 98{
80 struct user_struct *user; 99 struct user_struct *user;
81 struct hlist_node *h; 100 struct hlist_node *h;
82 101
83 hlist_for_each_entry(user, h, hashent, uidhash_node) { 102 hlist_for_each_entry(user, h, hashent, uidhash_node) {
84 if (user->uid == uid) { 103 if (uid_eq(user->uid, uid)) {
85 atomic_inc(&user->__count); 104 atomic_inc(&user->__count);
86 return user; 105 return user;
87 } 106 }
@@ -110,14 +129,13 @@ static void free_user(struct user_struct *up, unsigned long flags)
110 * 129 *
111 * If the user_struct could not be found, return NULL. 130 * If the user_struct could not be found, return NULL.
112 */ 131 */
113struct user_struct *find_user(uid_t uid) 132struct user_struct *find_user(kuid_t uid)
114{ 133{
115 struct user_struct *ret; 134 struct user_struct *ret;
116 unsigned long flags; 135 unsigned long flags;
117 struct user_namespace *ns = current_user_ns();
118 136
119 spin_lock_irqsave(&uidhash_lock, flags); 137 spin_lock_irqsave(&uidhash_lock, flags);
120 ret = uid_hash_find(uid, uidhashentry(ns, uid)); 138 ret = uid_hash_find(uid, uidhashentry(uid));
121 spin_unlock_irqrestore(&uidhash_lock, flags); 139 spin_unlock_irqrestore(&uidhash_lock, flags);
122 return ret; 140 return ret;
123} 141}
@@ -136,9 +154,9 @@ void free_uid(struct user_struct *up)
136 local_irq_restore(flags); 154 local_irq_restore(flags);
137} 155}
138 156
139struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) 157struct user_struct *alloc_uid(kuid_t uid)
140{ 158{
141 struct hlist_head *hashent = uidhashentry(ns, uid); 159 struct hlist_head *hashent = uidhashentry(uid);
142 struct user_struct *up, *new; 160 struct user_struct *up, *new;
143 161
144 spin_lock_irq(&uidhash_lock); 162 spin_lock_irq(&uidhash_lock);
@@ -153,8 +171,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
153 new->uid = uid; 171 new->uid = uid;
154 atomic_set(&new->__count, 1); 172 atomic_set(&new->__count, 1);
155 173
156 new->user_ns = get_user_ns(ns);
157
158 /* 174 /*
159 * Before adding this, check whether we raced 175 * Before adding this, check whether we raced
160 * on adding the same user already.. 176 * on adding the same user already..
@@ -162,7 +178,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
162 spin_lock_irq(&uidhash_lock); 178 spin_lock_irq(&uidhash_lock);
163 up = uid_hash_find(uid, hashent); 179 up = uid_hash_find(uid, hashent);
164 if (up) { 180 if (up) {
165 put_user_ns(ns);
166 key_put(new->uid_keyring); 181 key_put(new->uid_keyring);
167 key_put(new->session_keyring); 182 key_put(new->session_keyring);
168 kmem_cache_free(uid_cachep, new); 183 kmem_cache_free(uid_cachep, new);
@@ -187,11 +202,11 @@ static int __init uid_cache_init(void)
187 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 202 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
188 203
189 for(n = 0; n < UIDHASH_SZ; ++n) 204 for(n = 0; n < UIDHASH_SZ; ++n)
190 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); 205 INIT_HLIST_HEAD(uidhash_table + n);
191 206
192 /* Insert the root user immediately (init already runs as root) */ 207 /* Insert the root user immediately (init already runs as root) */
193 spin_lock_irq(&uidhash_lock); 208 spin_lock_irq(&uidhash_lock);
194 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); 209 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
195 spin_unlock_irq(&uidhash_lock); 210 spin_unlock_irq(&uidhash_lock);
196 211
197 return 0; 212 return 0;