diff options
author | Paul Mackerras <paulus@samba.org> | 2008-01-30 19:25:51 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-01-30 19:25:51 -0500 |
commit | bd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch) | |
tree | 5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /kernel/user.c | |
parent | 4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff) | |
parent | 5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff) |
Merge branch 'linux-2.6'
Diffstat (limited to 'kernel/user.c')
-rw-r--r-- | kernel/user.c | 152 |
1 files changed, 71 insertions, 81 deletions
diff --git a/kernel/user.c b/kernel/user.c index 8320a87f3e5a..bc1c48d35cb3 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -115,7 +115,7 @@ static void sched_switch_user(struct task_struct *p) { } | |||
115 | 115 | ||
116 | #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS) | 116 | #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS) |
117 | 117 | ||
118 | static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */ | 118 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
119 | static DEFINE_MUTEX(uids_mutex); | 119 | static DEFINE_MUTEX(uids_mutex); |
120 | 120 | ||
121 | static inline void uids_mutex_lock(void) | 121 | static inline void uids_mutex_lock(void) |
@@ -128,86 +128,83 @@ static inline void uids_mutex_unlock(void) | |||
128 | mutex_unlock(&uids_mutex); | 128 | mutex_unlock(&uids_mutex); |
129 | } | 129 | } |
130 | 130 | ||
131 | /* return cpu shares held by the user */ | 131 | /* uid directory attributes */ |
132 | static ssize_t cpu_shares_show(struct kset *kset, char *buffer) | 132 | static ssize_t cpu_shares_show(struct kobject *kobj, |
133 | struct kobj_attribute *attr, | ||
134 | char *buf) | ||
133 | { | 135 | { |
134 | struct user_struct *up = container_of(kset, struct user_struct, kset); | 136 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
135 | 137 | ||
136 | return sprintf(buffer, "%lu\n", sched_group_shares(up->tg)); | 138 | return sprintf(buf, "%lu\n", sched_group_shares(up->tg)); |
137 | } | 139 | } |
138 | 140 | ||
139 | /* modify cpu shares held by the user */ | 141 | static ssize_t cpu_shares_store(struct kobject *kobj, |
140 | static ssize_t cpu_shares_store(struct kset *kset, const char *buffer, | 142 | struct kobj_attribute *attr, |
141 | size_t size) | 143 | const char *buf, size_t size) |
142 | { | 144 | { |
143 | struct user_struct *up = container_of(kset, struct user_struct, kset); | 145 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
144 | unsigned long shares; | 146 | unsigned long shares; |
145 | int rc; | 147 | int rc; |
146 | 148 | ||
147 | sscanf(buffer, "%lu", &shares); | 149 | sscanf(buf, "%lu", &shares); |
148 | 150 | ||
149 | rc = sched_group_set_shares(up->tg, shares); | 151 | rc = sched_group_set_shares(up->tg, shares); |
150 | 152 | ||
151 | return (rc ? rc : size); | 153 | return (rc ? rc : size); |
152 | } | 154 | } |
153 | 155 | ||
154 | static void user_attr_init(struct subsys_attribute *sa, char *name, int mode) | 156 | static struct kobj_attribute cpu_share_attr = |
157 | __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); | ||
158 | |||
159 | /* default attributes per uid directory */ | ||
160 | static struct attribute *uids_attributes[] = { | ||
161 | &cpu_share_attr.attr, | ||
162 | NULL | ||
163 | }; | ||
164 | |||
165 | /* the lifetime of user_struct is not managed by the core (now) */ | ||
166 | static void uids_release(struct kobject *kobj) | ||
155 | { | 167 | { |
156 | sa->attr.name = name; | 168 | return; |
157 | sa->attr.mode = mode; | ||
158 | sa->show = cpu_shares_show; | ||
159 | sa->store = cpu_shares_store; | ||
160 | } | 169 | } |
161 | 170 | ||
162 | /* Create "/sys/kernel/uids/<uid>" directory and | 171 | static struct kobj_type uids_ktype = { |
163 | * "/sys/kernel/uids/<uid>/cpu_share" file for this user. | 172 | .sysfs_ops = &kobj_sysfs_ops, |
164 | */ | 173 | .default_attrs = uids_attributes, |
165 | static int user_kobject_create(struct user_struct *up) | 174 | .release = uids_release, |
175 | }; | ||
176 | |||
177 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | ||
178 | static int uids_user_create(struct user_struct *up) | ||
166 | { | 179 | { |
167 | struct kset *kset = &up->kset; | 180 | struct kobject *kobj = &up->kobj; |
168 | struct kobject *kobj = &kset->kobj; | ||
169 | int error; | 181 | int error; |
170 | 182 | ||
171 | memset(kset, 0, sizeof(struct kset)); | 183 | memset(kobj, 0, sizeof(struct kobject)); |
172 | kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */ | 184 | kobj->kset = uids_kset; |
173 | kobject_set_name(kobj, "%d", up->uid); | 185 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
174 | kset_init(kset); | 186 | if (error) { |
175 | user_attr_init(&up->user_attr, "cpu_share", 0644); | 187 | kobject_put(kobj); |
176 | |||
177 | error = kobject_add(kobj); | ||
178 | if (error) | ||
179 | goto done; | 188 | goto done; |
180 | 189 | } | |
181 | error = sysfs_create_file(kobj, &up->user_attr.attr); | ||
182 | if (error) | ||
183 | kobject_del(kobj); | ||
184 | 190 | ||
185 | kobject_uevent(kobj, KOBJ_ADD); | 191 | kobject_uevent(kobj, KOBJ_ADD); |
186 | |||
187 | done: | 192 | done: |
188 | return error; | 193 | return error; |
189 | } | 194 | } |
190 | 195 | ||
191 | /* create these in sysfs filesystem: | 196 | /* create these entries in sysfs: |
192 | * "/sys/kernel/uids" directory | 197 | * "/sys/kernel/uids" directory |
193 | * "/sys/kernel/uids/0" directory (for root user) | 198 | * "/sys/kernel/uids/0" directory (for root user) |
194 | * "/sys/kernel/uids/0/cpu_share" file (for root user) | 199 | * "/sys/kernel/uids/0/cpu_share" file (for root user) |
195 | */ | 200 | */ |
196 | int __init uids_kobject_init(void) | 201 | int __init uids_sysfs_init(void) |
197 | { | 202 | { |
198 | int error; | 203 | uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); |
199 | 204 | if (!uids_kset) | |
200 | /* create under /sys/kernel dir */ | 205 | return -ENOMEM; |
201 | uids_kobject.parent = &kernel_subsys.kobj; | ||
202 | uids_kobject.kset = &kernel_subsys; | ||
203 | kobject_set_name(&uids_kobject, "uids"); | ||
204 | kobject_init(&uids_kobject); | ||
205 | 206 | ||
206 | error = kobject_add(&uids_kobject); | 207 | return uids_user_create(&root_user); |
207 | if (!error) | ||
208 | error = user_kobject_create(&root_user); | ||
209 | |||
210 | return error; | ||
211 | } | 208 | } |
212 | 209 | ||
213 | /* work function to remove sysfs directory for a user and free up | 210 | /* work function to remove sysfs directory for a user and free up |
@@ -216,7 +213,6 @@ int __init uids_kobject_init(void) | |||
216 | static void remove_user_sysfs_dir(struct work_struct *w) | 213 | static void remove_user_sysfs_dir(struct work_struct *w) |
217 | { | 214 | { |
218 | struct user_struct *up = container_of(w, struct user_struct, work); | 215 | struct user_struct *up = container_of(w, struct user_struct, work); |
219 | struct kobject *kobj = &up->kset.kobj; | ||
220 | unsigned long flags; | 216 | unsigned long flags; |
221 | int remove_user = 0; | 217 | int remove_user = 0; |
222 | 218 | ||
@@ -238,9 +234,9 @@ static void remove_user_sysfs_dir(struct work_struct *w) | |||
238 | if (!remove_user) | 234 | if (!remove_user) |
239 | goto done; | 235 | goto done; |
240 | 236 | ||
241 | sysfs_remove_file(kobj, &up->user_attr.attr); | 237 | kobject_uevent(&up->kobj, KOBJ_REMOVE); |
242 | kobject_uevent(kobj, KOBJ_REMOVE); | 238 | kobject_del(&up->kobj); |
243 | kobject_del(kobj); | 239 | kobject_put(&up->kobj); |
244 | 240 | ||
245 | sched_destroy_user(up); | 241 | sched_destroy_user(up); |
246 | key_put(up->uid_keyring); | 242 | key_put(up->uid_keyring); |
@@ -267,7 +263,8 @@ static inline void free_user(struct user_struct *up, unsigned long flags) | |||
267 | 263 | ||
268 | #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */ | 264 | #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */ |
269 | 265 | ||
270 | static inline int user_kobject_create(struct user_struct *up) { return 0; } | 266 | int uids_sysfs_init(void) { return 0; } |
267 | static inline int uids_user_create(struct user_struct *up) { return 0; } | ||
271 | static inline void uids_mutex_lock(void) { } | 268 | static inline void uids_mutex_lock(void) { } |
272 | static inline void uids_mutex_unlock(void) { } | 269 | static inline void uids_mutex_unlock(void) { } |
273 | 270 | ||
@@ -322,9 +319,9 @@ void free_uid(struct user_struct *up) | |||
322 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | 319 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) |
323 | { | 320 | { |
324 | struct hlist_head *hashent = uidhashentry(ns, uid); | 321 | struct hlist_head *hashent = uidhashentry(ns, uid); |
325 | struct user_struct *up; | 322 | struct user_struct *up, *new; |
326 | 323 | ||
327 | /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert() | 324 | /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() |
328 | * atomic. | 325 | * atomic. |
329 | */ | 326 | */ |
330 | uids_mutex_lock(); | 327 | uids_mutex_lock(); |
@@ -334,13 +331,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
334 | spin_unlock_irq(&uidhash_lock); | 331 | spin_unlock_irq(&uidhash_lock); |
335 | 332 | ||
336 | if (!up) { | 333 | if (!up) { |
337 | struct user_struct *new; | ||
338 | |||
339 | new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); | 334 | new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); |
340 | if (!new) { | 335 | if (!new) |
341 | uids_mutex_unlock(); | 336 | goto out_unlock; |
342 | return NULL; | ||
343 | } | ||
344 | 337 | ||
345 | new->uid = uid; | 338 | new->uid = uid; |
346 | atomic_set(&new->__count, 1); | 339 | atomic_set(&new->__count, 1); |
@@ -356,28 +349,14 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
356 | #endif | 349 | #endif |
357 | new->locked_shm = 0; | 350 | new->locked_shm = 0; |
358 | 351 | ||
359 | if (alloc_uid_keyring(new, current) < 0) { | 352 | if (alloc_uid_keyring(new, current) < 0) |
360 | kmem_cache_free(uid_cachep, new); | 353 | goto out_free_user; |
361 | uids_mutex_unlock(); | ||
362 | return NULL; | ||
363 | } | ||
364 | 354 | ||
365 | if (sched_create_user(new) < 0) { | 355 | if (sched_create_user(new) < 0) |
366 | key_put(new->uid_keyring); | 356 | goto out_put_keys; |
367 | key_put(new->session_keyring); | ||
368 | kmem_cache_free(uid_cachep, new); | ||
369 | uids_mutex_unlock(); | ||
370 | return NULL; | ||
371 | } | ||
372 | 357 | ||
373 | if (user_kobject_create(new)) { | 358 | if (uids_user_create(new)) |
374 | sched_destroy_user(new); | 359 | goto out_destoy_sched; |
375 | key_put(new->uid_keyring); | ||
376 | key_put(new->session_keyring); | ||
377 | kmem_cache_free(uid_cachep, new); | ||
378 | uids_mutex_unlock(); | ||
379 | return NULL; | ||
380 | } | ||
381 | 360 | ||
382 | /* | 361 | /* |
383 | * Before adding this, check whether we raced | 362 | * Before adding this, check whether we raced |
@@ -405,6 +384,17 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
405 | uids_mutex_unlock(); | 384 | uids_mutex_unlock(); |
406 | 385 | ||
407 | return up; | 386 | return up; |
387 | |||
388 | out_destoy_sched: | ||
389 | sched_destroy_user(new); | ||
390 | out_put_keys: | ||
391 | key_put(new->uid_keyring); | ||
392 | key_put(new->session_keyring); | ||
393 | out_free_user: | ||
394 | kmem_cache_free(uid_cachep, new); | ||
395 | out_unlock: | ||
396 | uids_mutex_unlock(); | ||
397 | return NULL; | ||
408 | } | 398 | } |
409 | 399 | ||
410 | void switch_uid(struct user_struct *new_user) | 400 | void switch_uid(struct user_struct *new_user) |