diff options
author | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-02-17 21:51:42 -0500 |
---|---|---|
committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-02-17 21:51:42 -0500 |
commit | c58310bf4933986513020fa90b4190c7492995ae (patch) | |
tree | 143f2c7578d02ebef5db8fc57ae69e951ae0e2ee /kernel/user.c | |
parent | 269cdfaf769f5cd831284cc831790c7c5038040f (diff) | |
parent | 1309d4e68497184d2fd87e892ddf14076c2bda98 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
Diffstat (limited to 'kernel/user.c')
-rw-r--r-- | kernel/user.c | 60 |
1 files changed, 53 insertions, 7 deletions
diff --git a/kernel/user.c b/kernel/user.c index bc1c48d35cb3..7132022a040c 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -17,6 +17,14 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/user_namespace.h> | 18 | #include <linux/user_namespace.h> |
19 | 19 | ||
20 | struct user_namespace init_user_ns = { | ||
21 | .kref = { | ||
22 | .refcount = ATOMIC_INIT(2), | ||
23 | }, | ||
24 | .root_user = &root_user, | ||
25 | }; | ||
26 | EXPORT_SYMBOL_GPL(init_user_ns); | ||
27 | |||
20 | /* | 28 | /* |
21 | * UID task count cache, to get fast user lookup in "alloc_uid" | 29 | * UID task count cache, to get fast user lookup in "alloc_uid" |
22 | * when changing user ID's (ie setuid() and friends). | 30 | * when changing user ID's (ie setuid() and friends). |
@@ -49,7 +57,7 @@ struct user_struct root_user = { | |||
49 | .uid_keyring = &root_user_keyring, | 57 | .uid_keyring = &root_user_keyring, |
50 | .session_keyring = &root_session_keyring, | 58 | .session_keyring = &root_session_keyring, |
51 | #endif | 59 | #endif |
52 | #ifdef CONFIG_FAIR_USER_SCHED | 60 | #ifdef CONFIG_USER_SCHED |
53 | .tg = &init_task_group, | 61 | .tg = &init_task_group, |
54 | #endif | 62 | #endif |
55 | }; | 63 | }; |
@@ -82,7 +90,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | |||
82 | return NULL; | 90 | return NULL; |
83 | } | 91 | } |
84 | 92 | ||
85 | #ifdef CONFIG_FAIR_USER_SCHED | 93 | #ifdef CONFIG_USER_SCHED |
86 | 94 | ||
87 | static void sched_destroy_user(struct user_struct *up) | 95 | static void sched_destroy_user(struct user_struct *up) |
88 | { | 96 | { |
@@ -105,15 +113,15 @@ static void sched_switch_user(struct task_struct *p) | |||
105 | sched_move_task(p); | 113 | sched_move_task(p); |
106 | } | 114 | } |
107 | 115 | ||
108 | #else /* CONFIG_FAIR_USER_SCHED */ | 116 | #else /* CONFIG_USER_SCHED */ |
109 | 117 | ||
110 | static void sched_destroy_user(struct user_struct *up) { } | 118 | static void sched_destroy_user(struct user_struct *up) { } |
111 | static int sched_create_user(struct user_struct *up) { return 0; } | 119 | static int sched_create_user(struct user_struct *up) { return 0; } |
112 | static void sched_switch_user(struct task_struct *p) { } | 120 | static void sched_switch_user(struct task_struct *p) { } |
113 | 121 | ||
114 | #endif /* CONFIG_FAIR_USER_SCHED */ | 122 | #endif /* CONFIG_USER_SCHED */ |
115 | 123 | ||
116 | #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS) | 124 | #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) |
117 | 125 | ||
118 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ | 126 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
119 | static DEFINE_MUTEX(uids_mutex); | 127 | static DEFINE_MUTEX(uids_mutex); |
@@ -129,6 +137,7 @@ static inline void uids_mutex_unlock(void) | |||
129 | } | 137 | } |
130 | 138 | ||
131 | /* uid directory attributes */ | 139 | /* uid directory attributes */ |
140 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
132 | static ssize_t cpu_shares_show(struct kobject *kobj, | 141 | static ssize_t cpu_shares_show(struct kobject *kobj, |
133 | struct kobj_attribute *attr, | 142 | struct kobj_attribute *attr, |
134 | char *buf) | 143 | char *buf) |
@@ -155,10 +164,45 @@ static ssize_t cpu_shares_store(struct kobject *kobj, | |||
155 | 164 | ||
156 | static struct kobj_attribute cpu_share_attr = | 165 | static struct kobj_attribute cpu_share_attr = |
157 | __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); | 166 | __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); |
167 | #endif | ||
168 | |||
169 | #ifdef CONFIG_RT_GROUP_SCHED | ||
170 | static ssize_t cpu_rt_runtime_show(struct kobject *kobj, | ||
171 | struct kobj_attribute *attr, | ||
172 | char *buf) | ||
173 | { | ||
174 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | ||
175 | |||
176 | return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg)); | ||
177 | } | ||
178 | |||
179 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | ||
180 | struct kobj_attribute *attr, | ||
181 | const char *buf, size_t size) | ||
182 | { | ||
183 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | ||
184 | unsigned long rt_runtime; | ||
185 | int rc; | ||
186 | |||
187 | sscanf(buf, "%lu", &rt_runtime); | ||
188 | |||
189 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); | ||
190 | |||
191 | return (rc ? rc : size); | ||
192 | } | ||
193 | |||
194 | static struct kobj_attribute cpu_rt_runtime_attr = | ||
195 | __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); | ||
196 | #endif | ||
158 | 197 | ||
159 | /* default attributes per uid directory */ | 198 | /* default attributes per uid directory */ |
160 | static struct attribute *uids_attributes[] = { | 199 | static struct attribute *uids_attributes[] = { |
200 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
161 | &cpu_share_attr.attr, | 201 | &cpu_share_attr.attr, |
202 | #endif | ||
203 | #ifdef CONFIG_RT_GROUP_SCHED | ||
204 | &cpu_rt_runtime_attr.attr, | ||
205 | #endif | ||
162 | NULL | 206 | NULL |
163 | }; | 207 | }; |
164 | 208 | ||
@@ -261,7 +305,7 @@ static inline void free_user(struct user_struct *up, unsigned long flags) | |||
261 | schedule_work(&up->work); | 305 | schedule_work(&up->work); |
262 | } | 306 | } |
263 | 307 | ||
264 | #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */ | 308 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
265 | 309 | ||
266 | int uids_sysfs_init(void) { return 0; } | 310 | int uids_sysfs_init(void) { return 0; } |
267 | static inline int uids_user_create(struct user_struct *up) { return 0; } | 311 | static inline int uids_user_create(struct user_struct *up) { return 0; } |
@@ -365,7 +409,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
365 | spin_lock_irq(&uidhash_lock); | 409 | spin_lock_irq(&uidhash_lock); |
366 | up = uid_hash_find(uid, hashent); | 410 | up = uid_hash_find(uid, hashent); |
367 | if (up) { | 411 | if (up) { |
368 | /* This case is not possible when CONFIG_FAIR_USER_SCHED | 412 | /* This case is not possible when CONFIG_USER_SCHED |
369 | * is defined, since we serialize alloc_uid() using | 413 | * is defined, since we serialize alloc_uid() using |
370 | * uids_mutex. Hence no need to call | 414 | * uids_mutex. Hence no need to call |
371 | * sched_destroy_user() or remove_user_sysfs_dir(). | 415 | * sched_destroy_user() or remove_user_sysfs_dir(). |
@@ -427,6 +471,7 @@ void switch_uid(struct user_struct *new_user) | |||
427 | suid_keys(current); | 471 | suid_keys(current); |
428 | } | 472 | } |
429 | 473 | ||
474 | #ifdef CONFIG_USER_NS | ||
430 | void release_uids(struct user_namespace *ns) | 475 | void release_uids(struct user_namespace *ns) |
431 | { | 476 | { |
432 | int i; | 477 | int i; |
@@ -451,6 +496,7 @@ void release_uids(struct user_namespace *ns) | |||
451 | 496 | ||
452 | free_uid(ns->root_user); | 497 | free_uid(ns->root_user); |
453 | } | 498 | } |
499 | #endif | ||
454 | 500 | ||
455 | static int __init uid_cache_init(void) | 501 | static int __init uid_cache_init(void) |
456 | { | 502 | { |