diff options
| author | Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 2007-10-15 11:00:09 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:09 -0400 |
| commit | 24e377a83220ef05c9b5bec7e01d65eed6609aa6 (patch) | |
| tree | 9303b3d9f91ee39517d379aaac06c0432be8a9b8 /kernel/user.c | |
| parent | 9b5b77512dce239fa168183fa71896712232e95a (diff) | |
sched: add fair-user scheduler
Enable user-id based fair group scheduling. This is useful for anyone
who wants to test the group scheduler w/o having to enable
CONFIG_CGROUPS.
A separate scheduling group (i.e struct task_grp) is automatically created for
every new user added to the system. Upon uid change for a task, it is made to
move to the corresponding scheduling group.
A /proc tunable (/proc/root_user_share) is also provided to tune root
user's quota of cpu bandwidth.
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/user.c')
| -rw-r--r-- | kernel/user.c | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/user.c b/kernel/user.c index 9ca2848fc356..c6387fac932d 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -50,8 +50,41 @@ struct user_struct root_user = { | |||
| 50 | .uid_keyring = &root_user_keyring, | 50 | .uid_keyring = &root_user_keyring, |
| 51 | .session_keyring = &root_session_keyring, | 51 | .session_keyring = &root_session_keyring, |
| 52 | #endif | 52 | #endif |
| 53 | #ifdef CONFIG_FAIR_USER_SCHED | ||
| 54 | .tg = &init_task_grp, | ||
| 55 | #endif | ||
| 53 | }; | 56 | }; |
| 54 | 57 | ||
| 58 | #ifdef CONFIG_FAIR_USER_SCHED | ||
| 59 | static void sched_destroy_user(struct user_struct *up) | ||
| 60 | { | ||
| 61 | sched_destroy_group(up->tg); | ||
| 62 | } | ||
| 63 | |||
| 64 | static int sched_create_user(struct user_struct *up) | ||
| 65 | { | ||
| 66 | int rc = 0; | ||
| 67 | |||
| 68 | up->tg = sched_create_group(); | ||
| 69 | if (IS_ERR(up->tg)) | ||
| 70 | rc = -ENOMEM; | ||
| 71 | |||
| 72 | return rc; | ||
| 73 | } | ||
| 74 | |||
| 75 | static void sched_switch_user(struct task_struct *p) | ||
| 76 | { | ||
| 77 | sched_move_task(p); | ||
| 78 | } | ||
| 79 | |||
| 80 | #else /* CONFIG_FAIR_USER_SCHED */ | ||
| 81 | |||
| 82 | static void sched_destroy_user(struct user_struct *up) { } | ||
| 83 | static int sched_create_user(struct user_struct *up) { return 0; } | ||
| 84 | static void sched_switch_user(struct task_struct *p) { } | ||
| 85 | |||
| 86 | #endif /* CONFIG_FAIR_USER_SCHED */ | ||
| 87 | |||
| 55 | /* | 88 | /* |
| 56 | * These routines must be called with the uidhash spinlock held! | 89 | * These routines must be called with the uidhash spinlock held! |
| 57 | */ | 90 | */ |
| @@ -109,6 +142,7 @@ void free_uid(struct user_struct *up) | |||
| 109 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | 142 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { |
| 110 | uid_hash_remove(up); | 143 | uid_hash_remove(up); |
| 111 | spin_unlock_irqrestore(&uidhash_lock, flags); | 144 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 145 | sched_destroy_user(up); | ||
| 112 | key_put(up->uid_keyring); | 146 | key_put(up->uid_keyring); |
| 113 | key_put(up->session_keyring); | 147 | key_put(up->session_keyring); |
| 114 | kmem_cache_free(uid_cachep, up); | 148 | kmem_cache_free(uid_cachep, up); |
| @@ -150,6 +184,13 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 150 | return NULL; | 184 | return NULL; |
| 151 | } | 185 | } |
| 152 | 186 | ||
| 187 | if (sched_create_user(new) < 0) { | ||
| 188 | key_put(new->uid_keyring); | ||
| 189 | key_put(new->session_keyring); | ||
| 190 | kmem_cache_free(uid_cachep, new); | ||
| 191 | return NULL; | ||
| 192 | } | ||
| 193 | |||
| 153 | /* | 194 | /* |
| 154 | * Before adding this, check whether we raced | 195 | * Before adding this, check whether we raced |
| 155 | * on adding the same user already.. | 196 | * on adding the same user already.. |
| @@ -157,6 +198,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
| 157 | spin_lock_irq(&uidhash_lock); | 198 | spin_lock_irq(&uidhash_lock); |
| 158 | up = uid_hash_find(uid, hashent); | 199 | up = uid_hash_find(uid, hashent); |
| 159 | if (up) { | 200 | if (up) { |
| 201 | sched_destroy_user(new); | ||
| 160 | key_put(new->uid_keyring); | 202 | key_put(new->uid_keyring); |
| 161 | key_put(new->session_keyring); | 203 | key_put(new->session_keyring); |
| 162 | kmem_cache_free(uid_cachep, new); | 204 | kmem_cache_free(uid_cachep, new); |
| @@ -184,6 +226,7 @@ void switch_uid(struct user_struct *new_user) | |||
| 184 | atomic_dec(&old_user->processes); | 226 | atomic_dec(&old_user->processes); |
| 185 | switch_uid_keyring(new_user); | 227 | switch_uid_keyring(new_user); |
| 186 | current->user = new_user; | 228 | current->user = new_user; |
| 229 | sched_switch_user(current); | ||
| 187 | 230 | ||
| 188 | /* | 231 | /* |
| 189 | * We need to synchronize with __sigqueue_alloc() | 232 | * We need to synchronize with __sigqueue_alloc() |
