aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/user.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/user.c')
-rw-r--r--kernel/user.c189
1 files changed, 189 insertions, 0 deletions
diff --git a/kernel/user.c b/kernel/user.c
new file mode 100644
index 000000000000..734575d55769
--- /dev/null
+++ b/kernel/user.c
@@ -0,0 +1,189 @@
1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
16
17/*
18 * UID task count cache, to get fast user lookup in "alloc_uid"
19 * when changing user ID's (ie setuid() and friends).
20 */
21
22#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8)
23#define UIDHASH_SZ (1 << UIDHASH_BITS)
24#define UIDHASH_MASK (UIDHASH_SZ - 1)
25#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
26#define uidhashentry(uid) (uidhash_table + __uidhashfn((uid)))
27
28static kmem_cache_t *uid_cachep;
29static struct list_head uidhash_table[UIDHASH_SZ];
30static DEFINE_SPINLOCK(uidhash_lock);
31
32struct user_struct root_user = {
33 .__count = ATOMIC_INIT(1),
34 .processes = ATOMIC_INIT(1),
35 .files = ATOMIC_INIT(0),
36 .sigpending = ATOMIC_INIT(0),
37 .mq_bytes = 0,
38 .locked_shm = 0,
39#ifdef CONFIG_KEYS
40 .uid_keyring = &root_user_keyring,
41 .session_keyring = &root_session_keyring,
42#endif
43};
44
45/*
46 * These routines must be called with the uidhash spinlock held!
47 */
48static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
49{
50 list_add(&up->uidhash_list, hashent);
51}
52
53static inline void uid_hash_remove(struct user_struct *up)
54{
55 list_del(&up->uidhash_list);
56}
57
58static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
59{
60 struct list_head *up;
61
62 list_for_each(up, hashent) {
63 struct user_struct *user;
64
65 user = list_entry(up, struct user_struct, uidhash_list);
66
67 if(user->uid == uid) {
68 atomic_inc(&user->__count);
69 return user;
70 }
71 }
72
73 return NULL;
74}
75
76/*
77 * Locate the user_struct for the passed UID. If found, take a ref on it. The
78 * caller must undo that ref with free_uid().
79 *
80 * If the user_struct could not be found, return NULL.
81 */
82struct user_struct *find_user(uid_t uid)
83{
84 struct user_struct *ret;
85
86 spin_lock(&uidhash_lock);
87 ret = uid_hash_find(uid, uidhashentry(uid));
88 spin_unlock(&uidhash_lock);
89 return ret;
90}
91
92void free_uid(struct user_struct *up)
93{
94 if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
95 uid_hash_remove(up);
96 key_put(up->uid_keyring);
97 key_put(up->session_keyring);
98 kmem_cache_free(uid_cachep, up);
99 spin_unlock(&uidhash_lock);
100 }
101}
102
103struct user_struct * alloc_uid(uid_t uid)
104{
105 struct list_head *hashent = uidhashentry(uid);
106 struct user_struct *up;
107
108 spin_lock(&uidhash_lock);
109 up = uid_hash_find(uid, hashent);
110 spin_unlock(&uidhash_lock);
111
112 if (!up) {
113 struct user_struct *new;
114
115 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
116 if (!new)
117 return NULL;
118 new->uid = uid;
119 atomic_set(&new->__count, 1);
120 atomic_set(&new->processes, 0);
121 atomic_set(&new->files, 0);
122 atomic_set(&new->sigpending, 0);
123
124 new->mq_bytes = 0;
125 new->locked_shm = 0;
126
127 if (alloc_uid_keyring(new) < 0) {
128 kmem_cache_free(uid_cachep, new);
129 return NULL;
130 }
131
132 /*
133 * Before adding this, check whether we raced
134 * on adding the same user already..
135 */
136 spin_lock(&uidhash_lock);
137 up = uid_hash_find(uid, hashent);
138 if (up) {
139 key_put(new->uid_keyring);
140 key_put(new->session_keyring);
141 kmem_cache_free(uid_cachep, new);
142 } else {
143 uid_hash_insert(new, hashent);
144 up = new;
145 }
146 spin_unlock(&uidhash_lock);
147
148 }
149 return up;
150}
151
152void switch_uid(struct user_struct *new_user)
153{
154 struct user_struct *old_user;
155
156 /* What if a process setreuid()'s and this brings the
157 * new uid over his NPROC rlimit? We can check this now
158 * cheaply with the new uid cache, so if it matters
159 * we should be checking for it. -DaveM
160 */
161 old_user = current->user;
162 atomic_inc(&new_user->processes);
163 atomic_dec(&old_user->processes);
164 switch_uid_keyring(new_user);
165 current->user = new_user;
166 free_uid(old_user);
167 suid_keys(current);
168}
169
170
171static int __init uid_cache_init(void)
172{
173 int n;
174
175 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
176 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
177
178 for(n = 0; n < UIDHASH_SZ; ++n)
179 INIT_LIST_HEAD(uidhash_table + n);
180
181 /* Insert the root user immediately (init already runs as root) */
182 spin_lock(&uidhash_lock);
183 uid_hash_insert(&root_user, uidhashentry(0));
184 spin_unlock(&uidhash_lock);
185
186 return 0;
187}
188
189module_init(uid_cache_init);