diff options
Diffstat (limited to 'kernel/user.c')
| -rw-r--r-- | kernel/user.c | 316 |
1 files changed, 0 insertions, 316 deletions
diff --git a/kernel/user.c b/kernel/user.c index 46d0165ca70c..7e72614b736d 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/user_namespace.h> | 18 | #include <linux/user_namespace.h> |
| 19 | #include "cred-internals.h" | ||
| 20 | 19 | ||
| 21 | struct user_namespace init_user_ns = { | 20 | struct user_namespace init_user_ns = { |
| 22 | .kref = { | 21 | .kref = { |
| @@ -56,9 +55,6 @@ struct user_struct root_user = { | |||
| 56 | .sigpending = ATOMIC_INIT(0), | 55 | .sigpending = ATOMIC_INIT(0), |
| 57 | .locked_shm = 0, | 56 | .locked_shm = 0, |
| 58 | .user_ns = &init_user_ns, | 57 | .user_ns = &init_user_ns, |
| 59 | #ifdef CONFIG_USER_SCHED | ||
| 60 | .tg = &init_task_group, | ||
| 61 | #endif | ||
| 62 | }; | 58 | }; |
| 63 | 59 | ||
| 64 | /* | 60 | /* |
| @@ -75,268 +71,6 @@ static void uid_hash_remove(struct user_struct *up) | |||
| 75 | put_user_ns(up->user_ns); | 71 | put_user_ns(up->user_ns); |
| 76 | } | 72 | } |
| 77 | 73 | ||
| 78 | #ifdef CONFIG_USER_SCHED | ||
| 79 | |||
| 80 | static void sched_destroy_user(struct user_struct *up) | ||
| 81 | { | ||
| 82 | sched_destroy_group(up->tg); | ||
| 83 | } | ||
| 84 | |||
| 85 | static int sched_create_user(struct user_struct *up) | ||
| 86 | { | ||
| 87 | int rc = 0; | ||
| 88 | |||
| 89 | up->tg = sched_create_group(&root_task_group); | ||
| 90 | if (IS_ERR(up->tg)) | ||
| 91 | rc = -ENOMEM; | ||
| 92 | |||
| 93 | set_tg_uid(up); | ||
| 94 | |||
| 95 | return rc; | ||
| 96 | } | ||
| 97 | |||
| 98 | #else /* CONFIG_USER_SCHED */ | ||
| 99 | |||
| 100 | static void sched_destroy_user(struct user_struct *up) { } | ||
| 101 | static int sched_create_user(struct user_struct *up) { return 0; } | ||
| 102 | |||
| 103 | #endif /* CONFIG_USER_SCHED */ | ||
| 104 | |||
| 105 | #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) | ||
| 106 | |||
| 107 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | ||
| 108 | { | ||
| 109 | struct user_struct *user; | ||
| 110 | struct hlist_node *h; | ||
| 111 | |||
| 112 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | ||
| 113 | if (user->uid == uid) { | ||
| 114 | /* possibly resurrect an "almost deleted" object */ | ||
| 115 | if (atomic_inc_return(&user->__count) == 1) | ||
| 116 | cancel_delayed_work(&user->work); | ||
| 117 | return user; | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | return NULL; | ||
| 122 | } | ||
| 123 | |||
| 124 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ | ||
| 125 | static DEFINE_MUTEX(uids_mutex); | ||
| 126 | |||
| 127 | static inline void uids_mutex_lock(void) | ||
| 128 | { | ||
| 129 | mutex_lock(&uids_mutex); | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline void uids_mutex_unlock(void) | ||
| 133 | { | ||
| 134 | mutex_unlock(&uids_mutex); | ||
| 135 | } | ||
| 136 | |||
| 137 | /* uid directory attributes */ | ||
| 138 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 139 | static ssize_t cpu_shares_show(struct kobject *kobj, | ||
| 140 | struct kobj_attribute *attr, | ||
| 141 | char *buf) | ||
| 142 | { | ||
| 143 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | ||
| 144 | |||
| 145 | return sprintf(buf, "%lu\n", sched_group_shares(up->tg)); | ||
| 146 | } | ||
| 147 | |||
| 148 | static ssize_t cpu_shares_store(struct kobject *kobj, | ||
| 149 | struct kobj_attribute *attr, | ||
| 150 | const char *buf, size_t size) | ||
| 151 | { | ||
| 152 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | ||
| 153 | unsigned long shares; | ||
| 154 | int rc; | ||
| 155 | |||
| 156 | sscanf(buf, "%lu", &shares); | ||
| 157 | |||
| 158 | rc = sched_group_set_shares(up->tg, shares); | ||
| 159 | |||
| 160 | return (rc ? rc : size); | ||
| 161 | } | ||
| 162 | |||
| 163 | static struct kobj_attribute cpu_share_attr = | ||
| 164 | __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); | ||
| 165 | #endif | ||
| 166 | |||
| 167 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 168 | static ssize_t cpu_rt_runtime_show(struct kobject *kobj, | ||
| 169 | struct kobj_attribute *attr, | ||
| 170 | char *buf) | ||
| 171 | { | ||
| 172 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | ||
| 173 | |||
| 174 | return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg)); | ||
| 175 | } | ||
| 176 | |||
| 177 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | ||
| 178 | struct kobj_attribute *attr, | ||
| 179 | const char *buf, size_t size) | ||
| 180 | { | ||
| 181 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | ||
| 182 | unsigned long rt_runtime; | ||
| 183 | int rc; | ||
| 184 | |||
| 185 | sscanf(buf, "%ld", &rt_runtime); | ||
| 186 | |||
| 187 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); | ||
| 188 | |||
| 189 | return (rc ? rc : size); | ||
| 190 | } | ||
| 191 | |||
| 192 | static struct kobj_attribute cpu_rt_runtime_attr = | ||
| 193 | __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); | ||
| 194 | |||
| 195 | static ssize_t cpu_rt_period_show(struct kobject *kobj, | ||
| 196 | struct kobj_attribute *attr, | ||
| 197 | char *buf) | ||
| 198 | { | ||
| 199 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | ||
| 200 | |||
| 201 | return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg)); | ||
| 202 | } | ||
| 203 | |||
| 204 | static ssize_t cpu_rt_period_store(struct kobject *kobj, | ||
| 205 | struct kobj_attribute *attr, | ||
| 206 | const char *buf, size_t size) | ||
| 207 | { | ||
| 208 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | ||
| 209 | unsigned long rt_period; | ||
| 210 | int rc; | ||
| 211 | |||
| 212 | sscanf(buf, "%lu", &rt_period); | ||
| 213 | |||
| 214 | rc = sched_group_set_rt_period(up->tg, rt_period); | ||
| 215 | |||
| 216 | return (rc ? rc : size); | ||
| 217 | } | ||
| 218 | |||
| 219 | static struct kobj_attribute cpu_rt_period_attr = | ||
| 220 | __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store); | ||
| 221 | #endif | ||
| 222 | |||
| 223 | /* default attributes per uid directory */ | ||
| 224 | static struct attribute *uids_attributes[] = { | ||
| 225 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 226 | &cpu_share_attr.attr, | ||
| 227 | #endif | ||
| 228 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 229 | &cpu_rt_runtime_attr.attr, | ||
| 230 | &cpu_rt_period_attr.attr, | ||
| 231 | #endif | ||
| 232 | NULL | ||
| 233 | }; | ||
| 234 | |||
| 235 | /* the lifetime of user_struct is not managed by the core (now) */ | ||
| 236 | static void uids_release(struct kobject *kobj) | ||
| 237 | { | ||
| 238 | return; | ||
| 239 | } | ||
| 240 | |||
| 241 | static struct kobj_type uids_ktype = { | ||
| 242 | .sysfs_ops = &kobj_sysfs_ops, | ||
| 243 | .default_attrs = uids_attributes, | ||
| 244 | .release = uids_release, | ||
| 245 | }; | ||
| 246 | |||
| 247 | /* | ||
| 248 | * Create /sys/kernel/uids/<uid>/cpu_share file for this user | ||
| 249 | * We do not create this file for users in a user namespace (until | ||
| 250 | * sysfs tagging is implemented). | ||
| 251 | * | ||
| 252 | * See Documentation/scheduler/sched-design-CFS.txt for ramifications. | ||
| 253 | */ | ||
| 254 | static int uids_user_create(struct user_struct *up) | ||
| 255 | { | ||
| 256 | struct kobject *kobj = &up->kobj; | ||
| 257 | int error; | ||
| 258 | |||
| 259 | memset(kobj, 0, sizeof(struct kobject)); | ||
| 260 | if (up->user_ns != &init_user_ns) | ||
| 261 | return 0; | ||
| 262 | kobj->kset = uids_kset; | ||
| 263 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); | ||
| 264 | if (error) { | ||
| 265 | kobject_put(kobj); | ||
| 266 | goto done; | ||
| 267 | } | ||
| 268 | |||
| 269 | kobject_uevent(kobj, KOBJ_ADD); | ||
| 270 | done: | ||
| 271 | return error; | ||
| 272 | } | ||
| 273 | |||
| 274 | /* create these entries in sysfs: | ||
| 275 | * "/sys/kernel/uids" directory | ||
| 276 | * "/sys/kernel/uids/0" directory (for root user) | ||
| 277 | * "/sys/kernel/uids/0/cpu_share" file (for root user) | ||
| 278 | */ | ||
| 279 | int __init uids_sysfs_init(void) | ||
| 280 | { | ||
| 281 | uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); | ||
| 282 | if (!uids_kset) | ||
| 283 | return -ENOMEM; | ||
| 284 | |||
| 285 | return uids_user_create(&root_user); | ||
| 286 | } | ||
| 287 | |||
| 288 | /* delayed work function to remove sysfs directory for a user and free up | ||
| 289 | * corresponding structures. | ||
| 290 | */ | ||
| 291 | static void cleanup_user_struct(struct work_struct *w) | ||
| 292 | { | ||
| 293 | struct user_struct *up = container_of(w, struct user_struct, work.work); | ||
| 294 | unsigned long flags; | ||
| 295 | int remove_user = 0; | ||
| 296 | |||
| 297 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() | ||
| 298 | * atomic. | ||
| 299 | */ | ||
| 300 | uids_mutex_lock(); | ||
| 301 | |||
| 302 | spin_lock_irqsave(&uidhash_lock, flags); | ||
| 303 | if (atomic_read(&up->__count) == 0) { | ||
| 304 | uid_hash_remove(up); | ||
| 305 | remove_user = 1; | ||
| 306 | } | ||
| 307 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
| 308 | |||
| 309 | if (!remove_user) | ||
| 310 | goto done; | ||
| 311 | |||
| 312 | if (up->user_ns == &init_user_ns) { | ||
| 313 | kobject_uevent(&up->kobj, KOBJ_REMOVE); | ||
| 314 | kobject_del(&up->kobj); | ||
| 315 | kobject_put(&up->kobj); | ||
| 316 | } | ||
| 317 | |||
| 318 | sched_destroy_user(up); | ||
| 319 | key_put(up->uid_keyring); | ||
| 320 | key_put(up->session_keyring); | ||
| 321 | kmem_cache_free(uid_cachep, up); | ||
| 322 | |||
| 323 | done: | ||
| 324 | uids_mutex_unlock(); | ||
| 325 | } | ||
| 326 | |||
| 327 | /* IRQs are disabled and uidhash_lock is held upon function entry. | ||
| 328 | * IRQ state (as stored in flags) is restored and uidhash_lock released | ||
| 329 | * upon function exit. | ||
| 330 | */ | ||
| 331 | static void free_user(struct user_struct *up, unsigned long flags) | ||
| 332 | { | ||
| 333 | INIT_DELAYED_WORK(&up->work, cleanup_user_struct); | ||
| 334 | schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); | ||
| 335 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
| 336 | } | ||
| 337 | |||
| 338 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ | ||
| 339 | |||
| 340 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 74 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
| 341 | { | 75 | { |
| 342 | struct user_struct *user; | 76 | struct user_struct *user; |
| @@ -352,11 +86,6 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | |||
| 352 | return NULL; | 86 | return NULL; |
| 353 | } | 87 | } |
| 354 | 88 | ||
| 355 | int uids_sysfs_init(void) { return 0; } | ||
| 356 | static inline int uids_user_create(struct user_struct *up) { return 0; } | ||
| 357 | static inline void uids_mutex_lock(void) { } | ||
| 358 | static inline void uids_mutex_unlock(void) { } | ||
| 359 | |||
| 360 | /* IRQs are disabled and uidhash_lock is held upon function entry. | 89 | /* IRQs are disabled and uidhash_lock is held upon function entry. |
| 361 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 90 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
| 362 | * upon function exit. | 91 | * upon function exit. |
| @@ -365,32 +94,11 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
| 365 | { | 94 | { |
| 366 | uid_hash_remove(up); | 95 | uid_hash_remove(up); |
| 367 | spin_unlock_irqrestore(&uidhash_lock, flags); | 96 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 368 | sched_destroy_user(up); | ||
| 369 | key_put(up->uid_keyring); | 97 | key_put(up->uid_keyring); |
| 370 | key_put(up->session_keyring); | 98 | key_put(up->session_keyring); |
| 371 | kmem_cache_free(uid_cachep, up); | 99 | kmem_cache_free(uid_cachep, up); |
| 372 | } | 100 | } |
| 373 | 101 | ||
| 374 | #endif | ||
| 375 | |||
| 376 | #if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED) | ||
| 377 | /* | ||
| 378 | * We need to check if a setuid can take place. This function should be called | ||
| 379 | * before successfully completing the setuid. | ||
| 380 | */ | ||
| 381 | int task_can_switch_user(struct user_struct *up, struct task_struct *tsk) | ||
| 382 | { | ||
| 383 | |||
| 384 | return sched_rt_can_attach(up->tg, tsk); | ||
| 385 | |||
| 386 | } | ||
| 387 | #else | ||
| 388 | int task_can_switch_user(struct user_struct *up, struct task_struct *tsk) | ||
| 389 | { | ||
| 390 | return 1; | ||
| 391 | } | ||
| 392 | #endif | ||
| 393 | |||
| 394 | /* | 102 | /* |
| 395 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | 103 | * Locate the user_struct for the passed UID. If found, take a ref on it. The |
| 396 | * caller must undo that ref with free_uid(). | 104 | * caller must undo that ref with free_uid(). |
| @@ -428,11 +136,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 428 | struct hlist_head *hashent = uidhashentry(ns, uid); | 136 | struct hlist_head *hashent = uidhashentry(ns, uid); |
| 429 | struct user_struct *up, *new; | 137 | struct user_struct *up, *new; |
| 430 | 138 | ||
| 431 | /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() | ||
| 432 | * atomic. | ||
| 433 | */ | ||
| 434 | uids_mutex_lock(); | ||
| 435 | |||
| 436 | spin_lock_irq(&uidhash_lock); | 139 | spin_lock_irq(&uidhash_lock); |
| 437 | up = uid_hash_find(uid, hashent); | 140 | up = uid_hash_find(uid, hashent); |
| 438 | spin_unlock_irq(&uidhash_lock); | 141 | spin_unlock_irq(&uidhash_lock); |
| @@ -445,14 +148,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 445 | new->uid = uid; | 148 | new->uid = uid; |
| 446 | atomic_set(&new->__count, 1); | 149 | atomic_set(&new->__count, 1); |
| 447 | 150 | ||
| 448 | if (sched_create_user(new) < 0) | ||
| 449 | goto out_free_user; | ||
| 450 | |||
| 451 | new->user_ns = get_user_ns(ns); | 151 | new->user_ns = get_user_ns(ns); |
| 452 | 152 | ||
| 453 | if (uids_user_create(new)) | ||
| 454 | goto out_destoy_sched; | ||
| 455 | |||
| 456 | /* | 153 | /* |
| 457 | * Before adding this, check whether we raced | 154 | * Before adding this, check whether we raced |
| 458 | * on adding the same user already.. | 155 | * on adding the same user already.. |
| @@ -460,11 +157,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 460 | spin_lock_irq(&uidhash_lock); | 157 | spin_lock_irq(&uidhash_lock); |
| 461 | up = uid_hash_find(uid, hashent); | 158 | up = uid_hash_find(uid, hashent); |
| 462 | if (up) { | 159 | if (up) { |
| 463 | /* This case is not possible when CONFIG_USER_SCHED | ||
| 464 | * is defined, since we serialize alloc_uid() using | ||
| 465 | * uids_mutex. Hence no need to call | ||
| 466 | * sched_destroy_user() or remove_user_sysfs_dir(). | ||
| 467 | */ | ||
| 468 | key_put(new->uid_keyring); | 160 | key_put(new->uid_keyring); |
| 469 | key_put(new->session_keyring); | 161 | key_put(new->session_keyring); |
| 470 | kmem_cache_free(uid_cachep, new); | 162 | kmem_cache_free(uid_cachep, new); |
| @@ -475,17 +167,9 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 475 | spin_unlock_irq(&uidhash_lock); | 167 | spin_unlock_irq(&uidhash_lock); |
| 476 | } | 168 | } |
| 477 | 169 | ||
| 478 | uids_mutex_unlock(); | ||
| 479 | |||
| 480 | return up; | 170 | return up; |
| 481 | 171 | ||
| 482 | out_destoy_sched: | ||
| 483 | sched_destroy_user(new); | ||
| 484 | put_user_ns(new->user_ns); | ||
| 485 | out_free_user: | ||
| 486 | kmem_cache_free(uid_cachep, new); | ||
| 487 | out_unlock: | 172 | out_unlock: |
| 488 | uids_mutex_unlock(); | ||
| 489 | return NULL; | 173 | return NULL; |
| 490 | } | 174 | } |
| 491 | 175 | ||
