diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2011-11-17 02:20:58 -0500 |
---|---|---|
committer | Eric W. Biederman <ebiederm@xmission.com> | 2012-04-07 20:11:46 -0400 |
commit | 7b44ab978b77a91b327058a0f4db7e6fcdb90b92 (patch) | |
tree | 632c872f0b88d001f1bddce2c0aacd77bf062454 /kernel/user.c | |
parent | 5673a94c14574d7c6495c320c6b0e480673d54bd (diff) |
userns: Disassociate user_struct from the user_namespace.
Modify alloc_uid to take a kuid and make the user hash table global.
Stop holding a reference to the user namespace in struct user_struct.
This simplifies the code and makes the per user accounting not
care about which user namespace a uid happens to appear in.
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Diffstat (limited to 'kernel/user.c')
-rw-r--r-- | kernel/user.c | 28 |
1 files changed, 13 insertions, 15 deletions
diff --git a/kernel/user.c b/kernel/user.c index d65fec0615a0..025077e54a7c 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -34,11 +34,14 @@ EXPORT_SYMBOL_GPL(init_user_ns); | |||
34 | * when changing user ID's (ie setuid() and friends). | 34 | * when changing user ID's (ie setuid() and friends). |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) | ||
38 | #define UIDHASH_SZ (1 << UIDHASH_BITS) | ||
37 | #define UIDHASH_MASK (UIDHASH_SZ - 1) | 39 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
38 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | 40 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) |
39 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) | 41 | #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) |
40 | 42 | ||
41 | static struct kmem_cache *uid_cachep; | 43 | static struct kmem_cache *uid_cachep; |
44 | struct hlist_head uidhash_table[UIDHASH_SZ]; | ||
42 | 45 | ||
43 | /* | 46 | /* |
44 | * The uidhash_lock is mostly taken from process context, but it is | 47 | * The uidhash_lock is mostly taken from process context, but it is |
@@ -58,7 +61,7 @@ struct user_struct root_user = { | |||
58 | .files = ATOMIC_INIT(0), | 61 | .files = ATOMIC_INIT(0), |
59 | .sigpending = ATOMIC_INIT(0), | 62 | .sigpending = ATOMIC_INIT(0), |
60 | .locked_shm = 0, | 63 | .locked_shm = 0, |
61 | ._user_ns = &init_user_ns, | 64 | .uid = GLOBAL_ROOT_UID, |
62 | }; | 65 | }; |
63 | 66 | ||
64 | /* | 67 | /* |
@@ -72,16 +75,15 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | |||
72 | static void uid_hash_remove(struct user_struct *up) | 75 | static void uid_hash_remove(struct user_struct *up) |
73 | { | 76 | { |
74 | hlist_del_init(&up->uidhash_node); | 77 | hlist_del_init(&up->uidhash_node); |
75 | put_user_ns(up->_user_ns); /* It is safe to free the uid hash table now */ | ||
76 | } | 78 | } |
77 | 79 | ||
78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 80 | static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) |
79 | { | 81 | { |
80 | struct user_struct *user; | 82 | struct user_struct *user; |
81 | struct hlist_node *h; | 83 | struct hlist_node *h; |
82 | 84 | ||
83 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | 85 | hlist_for_each_entry(user, h, hashent, uidhash_node) { |
84 | if (user->uid == uid) { | 86 | if (uid_eq(user->uid, uid)) { |
85 | atomic_inc(&user->__count); | 87 | atomic_inc(&user->__count); |
86 | return user; | 88 | return user; |
87 | } | 89 | } |
@@ -110,14 +112,13 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
110 | * | 112 | * |
111 | * If the user_struct could not be found, return NULL. | 113 | * If the user_struct could not be found, return NULL. |
112 | */ | 114 | */ |
113 | struct user_struct *find_user(uid_t uid) | 115 | struct user_struct *find_user(kuid_t uid) |
114 | { | 116 | { |
115 | struct user_struct *ret; | 117 | struct user_struct *ret; |
116 | unsigned long flags; | 118 | unsigned long flags; |
117 | struct user_namespace *ns = current_user_ns(); | ||
118 | 119 | ||
119 | spin_lock_irqsave(&uidhash_lock, flags); | 120 | spin_lock_irqsave(&uidhash_lock, flags); |
120 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 121 | ret = uid_hash_find(uid, uidhashentry(uid)); |
121 | spin_unlock_irqrestore(&uidhash_lock, flags); | 122 | spin_unlock_irqrestore(&uidhash_lock, flags); |
122 | return ret; | 123 | return ret; |
123 | } | 124 | } |
@@ -136,9 +137,9 @@ void free_uid(struct user_struct *up) | |||
136 | local_irq_restore(flags); | 137 | local_irq_restore(flags); |
137 | } | 138 | } |
138 | 139 | ||
139 | struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | 140 | struct user_struct *alloc_uid(kuid_t uid) |
140 | { | 141 | { |
141 | struct hlist_head *hashent = uidhashentry(ns, uid); | 142 | struct hlist_head *hashent = uidhashentry(uid); |
142 | struct user_struct *up, *new; | 143 | struct user_struct *up, *new; |
143 | 144 | ||
144 | spin_lock_irq(&uidhash_lock); | 145 | spin_lock_irq(&uidhash_lock); |
@@ -153,8 +154,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
153 | new->uid = uid; | 154 | new->uid = uid; |
154 | atomic_set(&new->__count, 1); | 155 | atomic_set(&new->__count, 1); |
155 | 156 | ||
156 | new->_user_ns = get_user_ns(ns); | ||
157 | |||
158 | /* | 157 | /* |
159 | * Before adding this, check whether we raced | 158 | * Before adding this, check whether we raced |
160 | * on adding the same user already.. | 159 | * on adding the same user already.. |
@@ -162,7 +161,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
162 | spin_lock_irq(&uidhash_lock); | 161 | spin_lock_irq(&uidhash_lock); |
163 | up = uid_hash_find(uid, hashent); | 162 | up = uid_hash_find(uid, hashent); |
164 | if (up) { | 163 | if (up) { |
165 | put_user_ns(ns); | ||
166 | key_put(new->uid_keyring); | 164 | key_put(new->uid_keyring); |
167 | key_put(new->session_keyring); | 165 | key_put(new->session_keyring); |
168 | kmem_cache_free(uid_cachep, new); | 166 | kmem_cache_free(uid_cachep, new); |
@@ -187,11 +185,11 @@ static int __init uid_cache_init(void) | |||
187 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 185 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
188 | 186 | ||
189 | for(n = 0; n < UIDHASH_SZ; ++n) | 187 | for(n = 0; n < UIDHASH_SZ; ++n) |
190 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); | 188 | INIT_HLIST_HEAD(uidhash_table + n); |
191 | 189 | ||
192 | /* Insert the root user immediately (init already runs as root) */ | 190 | /* Insert the root user immediately (init already runs as root) */ |
193 | spin_lock_irq(&uidhash_lock); | 191 | spin_lock_irq(&uidhash_lock); |
194 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); | 192 | uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); |
195 | spin_unlock_irq(&uidhash_lock); | 193 | spin_unlock_irq(&uidhash_lock); |
196 | 194 | ||
197 | return 0; | 195 | return 0; |