aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/user.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 11:22:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 11:22:16 -0400
commitb5869ce7f68b233ceb81465a7644be0d9a5f3dbb (patch)
treee3611e7f038a4a4fa813532ae57a9a626fa1434d /kernel/user.c
parentdf3d80f5a5c74168be42788364d13cf6c83c7b9c (diff)
parent9c63d9c021f375a2708ad79043d6f4dd1291a085 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: (140 commits) sched: sync wakeups preempt too sched: affine sync wakeups sched: guest CPU accounting: maintain guest state in KVM sched: guest CPU accounting: maintain stats in account_system_time() sched: guest CPU accounting: add guest-CPU /proc/<pid>/stat fields sched: guest CPU accounting: add guest-CPU /proc/stat field sched: domain sysctl fixes: add terminator comment sched: domain sysctl fixes: do not crash on allocation failure sched: domain sysctl fixes: unregister the sysctl table before domains sched: domain sysctl fixes: use for_each_online_cpu() sched: domain sysctl fixes: use kcalloc() Make scheduler debug file operations const sched: enable wake-idle on CONFIG_SCHED_MC=y sched: reintroduce topology.h tunings sched: allow the immediate migration of cache-cold tasks sched: debug, improve migration statistics sched: debug: increase width of debug line sched: activate task_hot() only on fair-scheduled tasks sched: reintroduce cache-hot affinity sched: speed up context-switches a bit ...
Diffstat (limited to 'kernel/user.c')
-rw-r--r--kernel/user.c249
1 files changed, 238 insertions, 11 deletions
diff --git a/kernel/user.c b/kernel/user.c
index 9ca2848fc356..f0e561e6d085 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -50,12 +50,16 @@ struct user_struct root_user = {
50 .uid_keyring = &root_user_keyring, 50 .uid_keyring = &root_user_keyring,
51 .session_keyring = &root_session_keyring, 51 .session_keyring = &root_session_keyring,
52#endif 52#endif
53#ifdef CONFIG_FAIR_USER_SCHED
54 .tg = &init_task_group,
55#endif
53}; 56};
54 57
55/* 58/*
56 * These routines must be called with the uidhash spinlock held! 59 * These routines must be called with the uidhash spinlock held!
57 */ 60 */
58static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) 61static inline void uid_hash_insert(struct user_struct *up,
62 struct hlist_head *hashent)
59{ 63{
60 hlist_add_head(&up->uidhash_node, hashent); 64 hlist_add_head(&up->uidhash_node, hashent);
61} 65}
@@ -65,13 +69,14 @@ static inline void uid_hash_remove(struct user_struct *up)
65 hlist_del_init(&up->uidhash_node); 69 hlist_del_init(&up->uidhash_node);
66} 70}
67 71
68static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) 72static inline struct user_struct *uid_hash_find(uid_t uid,
73 struct hlist_head *hashent)
69{ 74{
70 struct user_struct *user; 75 struct user_struct *user;
71 struct hlist_node *h; 76 struct hlist_node *h;
72 77
73 hlist_for_each_entry(user, h, hashent, uidhash_node) { 78 hlist_for_each_entry(user, h, hashent, uidhash_node) {
74 if(user->uid == uid) { 79 if (user->uid == uid) {
75 atomic_inc(&user->__count); 80 atomic_inc(&user->__count);
76 return user; 81 return user;
77 } 82 }
@@ -80,6 +85,203 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *ha
80 return NULL; 85 return NULL;
81} 86}
82 87
88#ifdef CONFIG_FAIR_USER_SCHED
89
90static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
91static DEFINE_MUTEX(uids_mutex);
92
93static void sched_destroy_user(struct user_struct *up)
94{
95 sched_destroy_group(up->tg);
96}
97
98static int sched_create_user(struct user_struct *up)
99{
100 int rc = 0;
101
102 up->tg = sched_create_group();
103 if (IS_ERR(up->tg))
104 rc = -ENOMEM;
105
106 return rc;
107}
108
109static void sched_switch_user(struct task_struct *p)
110{
111 sched_move_task(p);
112}
113
114static inline void uids_mutex_lock(void)
115{
116 mutex_lock(&uids_mutex);
117}
118
119static inline void uids_mutex_unlock(void)
120{
121 mutex_unlock(&uids_mutex);
122}
123
124/* return cpu shares held by the user */
125ssize_t cpu_shares_show(struct kset *kset, char *buffer)
126{
127 struct user_struct *up = container_of(kset, struct user_struct, kset);
128
129 return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
130}
131
132/* modify cpu shares held by the user */
133ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
134{
135 struct user_struct *up = container_of(kset, struct user_struct, kset);
136 unsigned long shares;
137 int rc;
138
139 sscanf(buffer, "%lu", &shares);
140
141 rc = sched_group_set_shares(up->tg, shares);
142
143 return (rc ? rc : size);
144}
145
146static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
147{
148 sa->attr.name = name;
149 sa->attr.mode = mode;
150 sa->show = cpu_shares_show;
151 sa->store = cpu_shares_store;
152}
153
154/* Create "/sys/kernel/uids/<uid>" directory and
155 * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
156 */
157static int user_kobject_create(struct user_struct *up)
158{
159 struct kset *kset = &up->kset;
160 struct kobject *kobj = &kset->kobj;
161 int error;
162
163 memset(kset, 0, sizeof(struct kset));
164 kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
165 kobject_set_name(kobj, "%d", up->uid);
166 kset_init(kset);
167 user_attr_init(&up->user_attr, "cpu_share", 0644);
168
169 error = kobject_add(kobj);
170 if (error)
171 goto done;
172
173 error = sysfs_create_file(kobj, &up->user_attr.attr);
174 if (error)
175 kobject_del(kobj);
176
177 kobject_uevent(kobj, KOBJ_ADD);
178
179done:
180 return error;
181}
182
183/* create these in sysfs filesystem:
184 * "/sys/kernel/uids" directory
185 * "/sys/kernel/uids/0" directory (for root user)
186 * "/sys/kernel/uids/0/cpu_share" file (for root user)
187 */
188int __init uids_kobject_init(void)
189{
190 int error;
191
192 /* create under /sys/kernel dir */
193 uids_kobject.parent = &kernel_subsys.kobj;
194 uids_kobject.kset = &kernel_subsys;
195 kobject_set_name(&uids_kobject, "uids");
196 kobject_init(&uids_kobject);
197
198 error = kobject_add(&uids_kobject);
199 if (!error)
200 error = user_kobject_create(&root_user);
201
202 return error;
203}
204
205/* work function to remove sysfs directory for a user and free up
206 * corresponding structures.
207 */
208static void remove_user_sysfs_dir(struct work_struct *w)
209{
210 struct user_struct *up = container_of(w, struct user_struct, work);
211 struct kobject *kobj = &up->kset.kobj;
212 unsigned long flags;
213 int remove_user = 0;
214
215 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
216 * atomic.
217 */
218 uids_mutex_lock();
219
220 local_irq_save(flags);
221
222 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
223 uid_hash_remove(up);
224 remove_user = 1;
225 spin_unlock_irqrestore(&uidhash_lock, flags);
226 } else {
227 local_irq_restore(flags);
228 }
229
230 if (!remove_user)
231 goto done;
232
233 sysfs_remove_file(kobj, &up->user_attr.attr);
234 kobject_uevent(kobj, KOBJ_REMOVE);
235 kobject_del(kobj);
236
237 sched_destroy_user(up);
238 key_put(up->uid_keyring);
239 key_put(up->session_keyring);
240 kmem_cache_free(uid_cachep, up);
241
242done:
243 uids_mutex_unlock();
244}
245
246/* IRQs are disabled and uidhash_lock is held upon function entry.
247 * IRQ state (as stored in flags) is restored and uidhash_lock released
248 * upon function exit.
249 */
250static inline void free_user(struct user_struct *up, unsigned long flags)
251{
252 /* restore back the count */
253 atomic_inc(&up->__count);
254 spin_unlock_irqrestore(&uidhash_lock, flags);
255
256 INIT_WORK(&up->work, remove_user_sysfs_dir);
257 schedule_work(&up->work);
258}
259
260#else /* CONFIG_FAIR_USER_SCHED */
261
262static void sched_destroy_user(struct user_struct *up) { }
263static int sched_create_user(struct user_struct *up) { return 0; }
264static void sched_switch_user(struct task_struct *p) { }
265static inline int user_kobject_create(struct user_struct *up) { return 0; }
266static inline void uids_mutex_lock(void) { }
267static inline void uids_mutex_unlock(void) { }
268
269/* IRQs are disabled and uidhash_lock is held upon function entry.
270 * IRQ state (as stored in flags) is restored and uidhash_lock released
271 * upon function exit.
272 */
273static inline void free_user(struct user_struct *up, unsigned long flags)
274{
275 uid_hash_remove(up);
276 spin_unlock_irqrestore(&uidhash_lock, flags);
277 sched_destroy_user(up);
278 key_put(up->uid_keyring);
279 key_put(up->session_keyring);
280 kmem_cache_free(uid_cachep, up);
281}
282
283#endif /* CONFIG_FAIR_USER_SCHED */
284
83/* 285/*
84 * Locate the user_struct for the passed UID. If found, take a ref on it. The 286 * Locate the user_struct for the passed UID. If found, take a ref on it. The
85 * caller must undo that ref with free_uid(). 287 * caller must undo that ref with free_uid().
@@ -106,15 +308,10 @@ void free_uid(struct user_struct *up)
106 return; 308 return;
107 309
108 local_irq_save(flags); 310 local_irq_save(flags);
109 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { 311 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
110 uid_hash_remove(up); 312 free_user(up, flags);
111 spin_unlock_irqrestore(&uidhash_lock, flags); 313 else
112 key_put(up->uid_keyring);
113 key_put(up->session_keyring);
114 kmem_cache_free(uid_cachep, up);
115 } else {
116 local_irq_restore(flags); 314 local_irq_restore(flags);
117 }
118} 315}
119 316
120struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) 317struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
@@ -122,6 +319,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
122 struct hlist_head *hashent = uidhashentry(ns, uid); 319 struct hlist_head *hashent = uidhashentry(ns, uid);
123 struct user_struct *up; 320 struct user_struct *up;
124 321
322 /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
323 * atomic.
324 */
325 uids_mutex_lock();
326
125 spin_lock_irq(&uidhash_lock); 327 spin_lock_irq(&uidhash_lock);
126 up = uid_hash_find(uid, hashent); 328 up = uid_hash_find(uid, hashent);
127 spin_unlock_irq(&uidhash_lock); 329 spin_unlock_irq(&uidhash_lock);
@@ -150,6 +352,22 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
150 return NULL; 352 return NULL;
151 } 353 }
152 354
355 if (sched_create_user(new) < 0) {
356 key_put(new->uid_keyring);
357 key_put(new->session_keyring);
358 kmem_cache_free(uid_cachep, new);
359 return NULL;
360 }
361
362 if (user_kobject_create(new)) {
363 sched_destroy_user(new);
364 key_put(new->uid_keyring);
365 key_put(new->session_keyring);
366 kmem_cache_free(uid_cachep, new);
367 uids_mutex_unlock();
368 return NULL;
369 }
370
153 /* 371 /*
154 * Before adding this, check whether we raced 372 * Before adding this, check whether we raced
155 * on adding the same user already.. 373 * on adding the same user already..
@@ -157,6 +375,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
157 spin_lock_irq(&uidhash_lock); 375 spin_lock_irq(&uidhash_lock);
158 up = uid_hash_find(uid, hashent); 376 up = uid_hash_find(uid, hashent);
159 if (up) { 377 if (up) {
378 /* This case is not possible when CONFIG_FAIR_USER_SCHED
379 * is defined, since we serialize alloc_uid() using
380 * uids_mutex. Hence no need to call
381 * sched_destroy_user() or remove_user_sysfs_dir().
382 */
160 key_put(new->uid_keyring); 383 key_put(new->uid_keyring);
161 key_put(new->session_keyring); 384 key_put(new->session_keyring);
162 kmem_cache_free(uid_cachep, new); 385 kmem_cache_free(uid_cachep, new);
@@ -167,6 +390,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
167 spin_unlock_irq(&uidhash_lock); 390 spin_unlock_irq(&uidhash_lock);
168 391
169 } 392 }
393
394 uids_mutex_unlock();
395
170 return up; 396 return up;
171} 397}
172 398
@@ -184,6 +410,7 @@ void switch_uid(struct user_struct *new_user)
184 atomic_dec(&old_user->processes); 410 atomic_dec(&old_user->processes);
185 switch_uid_keyring(new_user); 411 switch_uid_keyring(new_user);
186 current->user = new_user; 412 current->user = new_user;
413 sched_switch_user(current);
187 414
188 /* 415 /*
189 * We need to synchronize with __sigqueue_alloc() 416 * We need to synchronize with __sigqueue_alloc()