diff options
author | Dhaval Giani <dhaval@linux.vnet.ibm.com> | 2007-10-15 11:00:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:14 -0400 |
commit | 5cb350baf580017da38199625b7365b1763d7180 (patch) | |
tree | 3830339798b1c6f19f1580700ea6ba240fb56ef2 /kernel/user.c | |
parent | 8ca0e14ffb12c257de591571a9e96102acdb1c64 (diff) |
sched: group scheduling, sysfs tunables
Add tunables in sysfs to modify a user's cpu share.
A directory is created in sysfs for each new user in the system.
/sys/kernel/uids/<uid>/cpu_share
Reading this file returns the cpu shares granted for the user.
Writing into this file modifies the cpu share for the user. Only an
administrator is allowed to modify a user's cpu share.
Ex:
# cd /sys/kernel/uids/
# cat 512/cpu_share
1024
# echo 2048 > 512/cpu_share
# cat 512/cpu_share
2048
#
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/user.c')
-rw-r--r-- | kernel/user.c | 240 |
1 files changed, 210 insertions, 30 deletions
diff --git a/kernel/user.c b/kernel/user.c index 0c9a7870d08f..74cadea8466f 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -55,7 +55,41 @@ struct user_struct root_user = { | |||
55 | #endif | 55 | #endif |
56 | }; | 56 | }; |
57 | 57 | ||
58 | /* | ||
59 | * These routines must be called with the uidhash spinlock held! | ||
60 | */ | ||
61 | static inline void uid_hash_insert(struct user_struct *up, | ||
62 | struct hlist_head *hashent) | ||
63 | { | ||
64 | hlist_add_head(&up->uidhash_node, hashent); | ||
65 | } | ||
66 | |||
67 | static inline void uid_hash_remove(struct user_struct *up) | ||
68 | { | ||
69 | hlist_del_init(&up->uidhash_node); | ||
70 | } | ||
71 | |||
72 | static inline struct user_struct *uid_hash_find(uid_t uid, | ||
73 | struct hlist_head *hashent) | ||
74 | { | ||
75 | struct user_struct *user; | ||
76 | struct hlist_node *h; | ||
77 | |||
78 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | ||
79 | if (user->uid == uid) { | ||
80 | atomic_inc(&user->__count); | ||
81 | return user; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | return NULL; | ||
86 | } | ||
87 | |||
58 | #ifdef CONFIG_FAIR_USER_SCHED | 88 | #ifdef CONFIG_FAIR_USER_SCHED |
89 | |||
90 | static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */ | ||
91 | static DEFINE_MUTEX(uids_mutex); | ||
92 | |||
59 | static void sched_destroy_user(struct user_struct *up) | 93 | static void sched_destroy_user(struct user_struct *up) |
60 | { | 94 | { |
61 | sched_destroy_group(up->tg); | 95 | sched_destroy_group(up->tg); |
@@ -77,42 +111,173 @@ static void sched_switch_user(struct task_struct *p) | |||
77 | sched_move_task(p); | 111 | sched_move_task(p); |
78 | } | 112 | } |
79 | 113 | ||
80 | #else /* CONFIG_FAIR_USER_SCHED */ | 114 | static inline void uids_mutex_lock(void) |
115 | { | ||
116 | mutex_lock(&uids_mutex); | ||
117 | } | ||
81 | 118 | ||
82 | static void sched_destroy_user(struct user_struct *up) { } | 119 | static inline void uids_mutex_unlock(void) |
83 | static int sched_create_user(struct user_struct *up) { return 0; } | 120 | { |
84 | static void sched_switch_user(struct task_struct *p) { } | 121 | mutex_unlock(&uids_mutex); |
122 | } | ||
85 | 123 | ||
86 | #endif /* CONFIG_FAIR_USER_SCHED */ | 124 | /* return cpu shares held by the user */ |
125 | ssize_t cpu_shares_show(struct kset *kset, char *buffer) | ||
126 | { | ||
127 | struct user_struct *up = container_of(kset, struct user_struct, kset); | ||
87 | 128 | ||
88 | /* | 129 | return sprintf(buffer, "%lu\n", sched_group_shares(up->tg)); |
89 | * These routines must be called with the uidhash spinlock held! | 130 | } |
131 | |||
132 | /* modify cpu shares held by the user */ | ||
133 | ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size) | ||
134 | { | ||
135 | struct user_struct *up = container_of(kset, struct user_struct, kset); | ||
136 | unsigned long shares; | ||
137 | int rc; | ||
138 | |||
139 | sscanf(buffer, "%lu", &shares); | ||
140 | |||
141 | rc = sched_group_set_shares(up->tg, shares); | ||
142 | |||
143 | return (rc ? rc : size); | ||
144 | } | ||
145 | |||
146 | static void user_attr_init(struct subsys_attribute *sa, char *name, int mode) | ||
147 | { | ||
148 | sa->attr.name = name; | ||
149 | sa->attr.mode = mode; | ||
150 | sa->show = cpu_shares_show; | ||
151 | sa->store = cpu_shares_store; | ||
152 | } | ||
153 | |||
154 | /* Create "/sys/kernel/uids/<uid>" directory and | ||
155 | * "/sys/kernel/uids/<uid>/cpu_share" file for this user. | ||
90 | */ | 156 | */ |
91 | static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | 157 | static int user_kobject_create(struct user_struct *up) |
92 | { | 158 | { |
93 | hlist_add_head(&up->uidhash_node, hashent); | 159 | struct kset *kset = &up->kset; |
160 | struct kobject *kobj = &kset->kobj; | ||
161 | int error; | ||
162 | |||
163 | memset(kset, 0, sizeof(struct kset)); | ||
164 | kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */ | ||
165 | kobject_set_name(kobj, "%d", up->uid); | ||
166 | kset_init(kset); | ||
167 | user_attr_init(&up->user_attr, "cpu_share", 0644); | ||
168 | |||
169 | error = kobject_add(kobj); | ||
170 | if (error) | ||
171 | goto done; | ||
172 | |||
173 | error = sysfs_create_file(kobj, &up->user_attr.attr); | ||
174 | if (error) | ||
175 | kobject_del(kobj); | ||
176 | |||
177 | done: | ||
178 | return error; | ||
94 | } | 179 | } |
95 | 180 | ||
96 | static inline void uid_hash_remove(struct user_struct *up) | 181 | /* create these in sysfs filesystem: |
182 | * "/sys/kernel/uids" directory | ||
183 | * "/sys/kernel/uids/0" directory (for root user) | ||
184 | * "/sys/kernel/uids/0/cpu_share" file (for root user) | ||
185 | */ | ||
186 | int __init uids_kobject_init(void) | ||
97 | { | 187 | { |
98 | hlist_del_init(&up->uidhash_node); | 188 | int error; |
189 | |||
190 | /* create under /sys/kernel dir */ | ||
191 | uids_kobject.parent = &kernel_subsys.kobj; | ||
192 | kobject_set_name(&uids_kobject, "uids"); | ||
193 | kobject_init(&uids_kobject); | ||
194 | |||
195 | error = kobject_add(&uids_kobject); | ||
196 | if (!error) | ||
197 | error = user_kobject_create(&root_user); | ||
198 | |||
199 | return error; | ||
99 | } | 200 | } |
100 | 201 | ||
101 | static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 202 | /* work function to remove sysfs directory for a user and free up |
203 | * corresponding structures. | ||
204 | */ | ||
205 | static void remove_user_sysfs_dir(struct work_struct *w) | ||
102 | { | 206 | { |
103 | struct user_struct *user; | 207 | struct user_struct *up = container_of(w, struct user_struct, work); |
104 | struct hlist_node *h; | 208 | struct kobject *kobj = &up->kset.kobj; |
209 | unsigned long flags; | ||
210 | int remove_user = 0; | ||
105 | 211 | ||
106 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | 212 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
107 | if(user->uid == uid) { | 213 | * atomic. |
108 | atomic_inc(&user->__count); | 214 | */ |
109 | return user; | 215 | uids_mutex_lock(); |
110 | } | 216 | |
217 | local_irq_save(flags); | ||
218 | |||
219 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | ||
220 | uid_hash_remove(up); | ||
221 | remove_user = 1; | ||
222 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
223 | } else { | ||
224 | local_irq_restore(flags); | ||
111 | } | 225 | } |
112 | 226 | ||
113 | return NULL; | 227 | if (!remove_user) |
228 | goto done; | ||
229 | |||
230 | sysfs_remove_file(kobj, &up->user_attr.attr); | ||
231 | kobject_del(kobj); | ||
232 | |||
233 | sched_destroy_user(up); | ||
234 | key_put(up->uid_keyring); | ||
235 | key_put(up->session_keyring); | ||
236 | kmem_cache_free(uid_cachep, up); | ||
237 | |||
238 | done: | ||
239 | uids_mutex_unlock(); | ||
240 | } | ||
241 | |||
242 | /* IRQs are disabled and uidhash_lock is held upon function entry. | ||
243 | * IRQ state (as stored in flags) is restored and uidhash_lock released | ||
244 | * upon function exit. | ||
245 | */ | ||
246 | static inline void free_user(struct user_struct *up, unsigned long flags) | ||
247 | { | ||
248 | /* restore back the count */ | ||
249 | atomic_inc(&up->__count); | ||
250 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
251 | |||
252 | INIT_WORK(&up->work, remove_user_sysfs_dir); | ||
253 | schedule_work(&up->work); | ||
114 | } | 254 | } |
115 | 255 | ||
256 | #else /* CONFIG_FAIR_USER_SCHED */ | ||
257 | |||
258 | static void sched_destroy_user(struct user_struct *up) { } | ||
259 | static int sched_create_user(struct user_struct *up) { return 0; } | ||
260 | static void sched_switch_user(struct task_struct *p) { } | ||
261 | static inline int user_kobject_create(struct user_struct *up) { return 0; } | ||
262 | static inline void uids_mutex_lock(void) { } | ||
263 | static inline void uids_mutex_unlock(void) { } | ||
264 | |||
265 | /* IRQs are disabled and uidhash_lock is held upon function entry. | ||
266 | * IRQ state (as stored in flags) is restored and uidhash_lock released | ||
267 | * upon function exit. | ||
268 | */ | ||
269 | static inline void free_user(struct user_struct *up, unsigned long flags) | ||
270 | { | ||
271 | uid_hash_remove(up); | ||
272 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
273 | sched_destroy_user(up); | ||
274 | key_put(up->uid_keyring); | ||
275 | key_put(up->session_keyring); | ||
276 | kmem_cache_free(uid_cachep, up); | ||
277 | } | ||
278 | |||
279 | #endif /* CONFIG_FAIR_USER_SCHED */ | ||
280 | |||
116 | /* | 281 | /* |
117 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | 282 | * Locate the user_struct for the passed UID. If found, take a ref on it. The |
118 | * caller must undo that ref with free_uid(). | 283 | * caller must undo that ref with free_uid(). |
@@ -139,16 +304,10 @@ void free_uid(struct user_struct *up) | |||
139 | return; | 304 | return; |
140 | 305 | ||
141 | local_irq_save(flags); | 306 | local_irq_save(flags); |
142 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | 307 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) |
143 | uid_hash_remove(up); | 308 | free_user(up, flags); |
144 | spin_unlock_irqrestore(&uidhash_lock, flags); | 309 | else |
145 | sched_destroy_user(up); | ||
146 | key_put(up->uid_keyring); | ||
147 | key_put(up->session_keyring); | ||
148 | kmem_cache_free(uid_cachep, up); | ||
149 | } else { | ||
150 | local_irq_restore(flags); | 310 | local_irq_restore(flags); |
151 | } | ||
152 | } | 311 | } |
153 | 312 | ||
154 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | 313 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) |
@@ -156,6 +315,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
156 | struct hlist_head *hashent = uidhashentry(ns, uid); | 315 | struct hlist_head *hashent = uidhashentry(ns, uid); |
157 | struct user_struct *up; | 316 | struct user_struct *up; |
158 | 317 | ||
318 | /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert() | ||
319 | * atomic. | ||
320 | */ | ||
321 | uids_mutex_lock(); | ||
322 | |||
159 | spin_lock_irq(&uidhash_lock); | 323 | spin_lock_irq(&uidhash_lock); |
160 | up = uid_hash_find(uid, hashent); | 324 | up = uid_hash_find(uid, hashent); |
161 | spin_unlock_irq(&uidhash_lock); | 325 | spin_unlock_irq(&uidhash_lock); |
@@ -191,6 +355,15 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
191 | return NULL; | 355 | return NULL; |
192 | } | 356 | } |
193 | 357 | ||
358 | if (user_kobject_create(new)) { | ||
359 | sched_destroy_user(new); | ||
360 | key_put(new->uid_keyring); | ||
361 | key_put(new->session_keyring); | ||
362 | kmem_cache_free(uid_cachep, new); | ||
363 | uids_mutex_unlock(); | ||
364 | return NULL; | ||
365 | } | ||
366 | |||
194 | /* | 367 | /* |
195 | * Before adding this, check whether we raced | 368 | * Before adding this, check whether we raced |
196 | * on adding the same user already.. | 369 | * on adding the same user already.. |
@@ -198,7 +371,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
198 | spin_lock_irq(&uidhash_lock); | 371 | spin_lock_irq(&uidhash_lock); |
199 | up = uid_hash_find(uid, hashent); | 372 | up = uid_hash_find(uid, hashent); |
200 | if (up) { | 373 | if (up) { |
201 | sched_destroy_user(new); | 374 | /* This case is not possible when CONFIG_FAIR_USER_SCHED |
375 | * is defined, since we serialize alloc_uid() using | ||
376 | * uids_mutex. Hence no need to call | ||
377 | * sched_destroy_user() or remove_user_sysfs_dir(). | ||
378 | */ | ||
202 | key_put(new->uid_keyring); | 379 | key_put(new->uid_keyring); |
203 | key_put(new->session_keyring); | 380 | key_put(new->session_keyring); |
204 | kmem_cache_free(uid_cachep, new); | 381 | kmem_cache_free(uid_cachep, new); |
@@ -209,6 +386,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
209 | spin_unlock_irq(&uidhash_lock); | 386 | spin_unlock_irq(&uidhash_lock); |
210 | 387 | ||
211 | } | 388 | } |
389 | |||
390 | uids_mutex_unlock(); | ||
391 | |||
212 | return up; | 392 | return up; |
213 | } | 393 | } |
214 | 394 | ||