aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2007-10-19 02:41:03 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-19 14:53:51 -0400
commit68318b8e0b61f98f0be833cc862ab6dee69348b4 (patch)
treecb48f82c73ff2204754ff3d5955a0073ca38c383 /kernel
parentfb391599f2eaf22197e3e914187c957ef7eeb4c5 (diff)
Hook up group scheduler with control groups
Enable "cgroup" (formerly containers) based fair group scheduling. This will let administrator create arbitrary groups of tasks (using "cgroup" pseudo filesystem) and control their cpu bandwidth usage. [akpm@linux-foundation.org: fix cpp condition] Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Paul Menage <menage@google.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c121
1 files changed, 121 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4071306e1088..afe76ec2e7fe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -155,10 +155,15 @@ struct rt_prio_array {
155 155
156#ifdef CONFIG_FAIR_GROUP_SCHED 156#ifdef CONFIG_FAIR_GROUP_SCHED
157 157
158#include <linux/cgroup.h>
159
158struct cfs_rq; 160struct cfs_rq;
159 161
160/* task group related information */ 162/* task group related information */
161struct task_group { 163struct task_group {
164#ifdef CONFIG_FAIR_CGROUP_SCHED
165 struct cgroup_subsys_state css;
166#endif
162 /* schedulable entities of this group on each cpu */ 167 /* schedulable entities of this group on each cpu */
163 struct sched_entity **se; 168 struct sched_entity **se;
164 /* runqueue "owned" by this group on each cpu */ 169 /* runqueue "owned" by this group on each cpu */
@@ -199,6 +204,9 @@ static inline struct task_group *task_group(struct task_struct *p)
199 204
200#ifdef CONFIG_FAIR_USER_SCHED 205#ifdef CONFIG_FAIR_USER_SCHED
201 tg = p->user->tg; 206 tg = p->user->tg;
207#elif defined(CONFIG_FAIR_CGROUP_SCHED)
208 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
209 struct task_group, css);
202#else 210#else
203 tg = &init_task_group; 211 tg = &init_task_group;
204#endif 212#endif
@@ -7091,3 +7099,116 @@ unsigned long sched_group_shares(struct task_group *tg)
7091} 7099}
7092 7100
7093#endif /* CONFIG_FAIR_GROUP_SCHED */ 7101#endif /* CONFIG_FAIR_GROUP_SCHED */
7102
7103#ifdef CONFIG_FAIR_CGROUP_SCHED
7104
7105/* return corresponding task_group object of a cgroup */
7106static inline struct task_group *cgroup_tg(struct cgroup *cont)
7107{
7108 return container_of(cgroup_subsys_state(cont, cpu_cgroup_subsys_id),
7109 struct task_group, css);
7110}
7111
7112static struct cgroup_subsys_state *
7113cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
7114{
7115 struct task_group *tg;
7116
7117 if (!cont->parent) {
7118 /* This is early initialization for the top cgroup */
7119 init_task_group.css.cgroup = cont;
7120 return &init_task_group.css;
7121 }
7122
7123 /* we support only 1-level deep hierarchical scheduler atm */
7124 if (cont->parent->parent)
7125 return ERR_PTR(-EINVAL);
7126
7127 tg = sched_create_group();
7128 if (IS_ERR(tg))
7129 return ERR_PTR(-ENOMEM);
7130
7131 /* Bind the cgroup to task_group object we just created */
7132 tg->css.cgroup = cont;
7133
7134 return &tg->css;
7135}
7136
7137static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
7138 struct cgroup *cont)
7139{
7140 struct task_group *tg = cgroup_tg(cont);
7141
7142 sched_destroy_group(tg);
7143}
7144
7145static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
7146 struct cgroup *cont, struct task_struct *tsk)
7147{
7148 /* We don't support RT-tasks being in separate groups */
7149 if (tsk->sched_class != &fair_sched_class)
7150 return -EINVAL;
7151
7152 return 0;
7153}
7154
7155static void
7156cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cont,
7157 struct cgroup *old_cont, struct task_struct *tsk)
7158{
7159 sched_move_task(tsk);
7160}
7161
7162static ssize_t cpu_shares_write(struct cgroup *cont, struct cftype *cftype,
7163 struct file *file, const char __user *userbuf,
7164 size_t nbytes, loff_t *ppos)
7165{
7166 unsigned long shareval;
7167 struct task_group *tg = cgroup_tg(cont);
7168 char buffer[2*sizeof(unsigned long) + 1];
7169 int rc;
7170
7171 if (nbytes > 2*sizeof(unsigned long)) /* safety check */
7172 return -E2BIG;
7173
7174 if (copy_from_user(buffer, userbuf, nbytes))
7175 return -EFAULT;
7176
7177 buffer[nbytes] = 0; /* nul-terminate */
7178 shareval = simple_strtoul(buffer, NULL, 10);
7179
7180 rc = sched_group_set_shares(tg, shareval);
7181
7182 return (rc < 0 ? rc : nbytes);
7183}
7184
7185static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft)
7186{
7187 struct task_group *tg = cgroup_tg(cont);
7188
7189 return (u64) tg->shares;
7190}
7191
7192static struct cftype cpu_shares = {
7193 .name = "shares",
7194 .read_uint = cpu_shares_read_uint,
7195 .write = cpu_shares_write,
7196};
7197
7198static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
7199{
7200 return cgroup_add_file(cont, ss, &cpu_shares);
7201}
7202
7203struct cgroup_subsys cpu_cgroup_subsys = {
7204 .name = "cpu",
7205 .create = cpu_cgroup_create,
7206 .destroy = cpu_cgroup_destroy,
7207 .can_attach = cpu_cgroup_can_attach,
7208 .attach = cpu_cgroup_attach,
7209 .populate = cpu_cgroup_populate,
7210 .subsys_id = cpu_cgroup_subsys_id,
7211 .early_init = 1,
7212};
7213
7214#endif /* CONFIG_FAIR_CGROUP_SCHED */