aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c85
1 files changed, 53 insertions, 32 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b7399dfa361..f9c8da798bbf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7438,10 +7438,11 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7438} 7438}
7439 7439
7440#ifdef CONFIG_FAIR_GROUP_SCHED 7440#ifdef CONFIG_FAIR_GROUP_SCHED
7441static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg, 7441static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7442 struct cfs_rq *cfs_rq, struct sched_entity *se, 7442 struct sched_entity *se, int cpu, int add,
7443 int cpu, int add) 7443 struct sched_entity *parent)
7444{ 7444{
7445 struct rq *rq = cpu_rq(cpu);
7445 tg->cfs_rq[cpu] = cfs_rq; 7446 tg->cfs_rq[cpu] = cfs_rq;
7446 init_cfs_rq(cfs_rq, rq); 7447 init_cfs_rq(cfs_rq, rq);
7447 cfs_rq->tg = tg; 7448 cfs_rq->tg = tg;
@@ -7453,19 +7454,25 @@ static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
7453 if (!se) 7454 if (!se)
7454 return; 7455 return;
7455 7456
7456 se->cfs_rq = &rq->cfs; 7457 if (!parent)
7458 se->cfs_rq = &rq->cfs;
7459 else
7460 se->cfs_rq = parent->my_q;
7461
7457 se->my_q = cfs_rq; 7462 se->my_q = cfs_rq;
7458 se->load.weight = tg->shares; 7463 se->load.weight = tg->shares;
7459 se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); 7464 se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
7460 se->parent = NULL; 7465 se->parent = parent;
7461} 7466}
7462#endif 7467#endif
7463 7468
7464#ifdef CONFIG_RT_GROUP_SCHED 7469#ifdef CONFIG_RT_GROUP_SCHED
7465static void init_tg_rt_entry(struct rq *rq, struct task_group *tg, 7470static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7466 struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 7471 struct sched_rt_entity *rt_se, int cpu, int add,
7467 int cpu, int add) 7472 struct sched_rt_entity *parent)
7468{ 7473{
7474 struct rq *rq = cpu_rq(cpu);
7475
7469 tg->rt_rq[cpu] = rt_rq; 7476 tg->rt_rq[cpu] = rt_rq;
7470 init_rt_rq(rt_rq, rq); 7477 init_rt_rq(rt_rq, rq);
7471 rt_rq->tg = tg; 7478 rt_rq->tg = tg;
@@ -7478,9 +7485,14 @@ static void init_tg_rt_entry(struct rq *rq, struct task_group *tg,
7478 if (!rt_se) 7485 if (!rt_se)
7479 return; 7486 return;
7480 7487
7488 if (!parent)
7489 rt_se->rt_rq = &rq->rt;
7490 else
7491 rt_se->rt_rq = parent->my_q;
7492
7481 rt_se->rt_rq = &rq->rt; 7493 rt_se->rt_rq = &rq->rt;
7482 rt_se->my_q = rt_rq; 7494 rt_se->my_q = rt_rq;
7483 rt_se->parent = NULL; 7495 rt_se->parent = parent;
7484 INIT_LIST_HEAD(&rt_se->run_list); 7496 INIT_LIST_HEAD(&rt_se->run_list);
7485} 7497}
7486#endif 7498#endif
@@ -7568,7 +7580,7 @@ void __init sched_init(void)
7568 * We achieve this by letting init_task_group's tasks sit 7580 * We achieve this by letting init_task_group's tasks sit
7569 * directly in rq->cfs (i.e init_task_group->se[] = NULL). 7581 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
7570 */ 7582 */
7571 init_tg_cfs_entry(rq, &init_task_group, &rq->cfs, NULL, i, 1); 7583 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
7572#elif defined CONFIG_USER_SCHED 7584#elif defined CONFIG_USER_SCHED
7573 /* 7585 /*
7574 * In case of task-groups formed thr' the user id of tasks, 7586 * In case of task-groups formed thr' the user id of tasks,
@@ -7581,9 +7593,9 @@ void __init sched_init(void)
7581 * (init_cfs_rq) and having one entity represent this group of 7593 * (init_cfs_rq) and having one entity represent this group of
7582 * tasks in rq->cfs (i.e init_task_group->se[] != NULL). 7594 * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
7583 */ 7595 */
7584 init_tg_cfs_entry(rq, &init_task_group, 7596 init_tg_cfs_entry(&init_task_group,
7585 &per_cpu(init_cfs_rq, i), 7597 &per_cpu(init_cfs_rq, i),
7586 &per_cpu(init_sched_entity, i), i, 1); 7598 &per_cpu(init_sched_entity, i), i, 1, NULL);
7587 7599
7588#endif 7600#endif
7589#endif /* CONFIG_FAIR_GROUP_SCHED */ 7601#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -7592,11 +7604,11 @@ void __init sched_init(void)
7592#ifdef CONFIG_RT_GROUP_SCHED 7604#ifdef CONFIG_RT_GROUP_SCHED
7593 INIT_LIST_HEAD(&rq->leaf_rt_rq_list); 7605 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7594#ifdef CONFIG_CGROUP_SCHED 7606#ifdef CONFIG_CGROUP_SCHED
7595 init_tg_rt_entry(rq, &init_task_group, &rq->rt, NULL, i, 1); 7607 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
7596#elif defined CONFIG_USER_SCHED 7608#elif defined CONFIG_USER_SCHED
7597 init_tg_rt_entry(rq, &init_task_group, 7609 init_tg_rt_entry(&init_task_group,
7598 &per_cpu(init_rt_rq, i), 7610 &per_cpu(init_rt_rq, i),
7599 &per_cpu(init_sched_rt_entity, i), i, 1); 7611 &per_cpu(init_sched_rt_entity, i), i, 1, NULL);
7600#endif 7612#endif
7601#endif 7613#endif
7602 7614
@@ -7798,10 +7810,11 @@ static void free_fair_sched_group(struct task_group *tg)
7798 kfree(tg->se); 7810 kfree(tg->se);
7799} 7811}
7800 7812
7801static int alloc_fair_sched_group(struct task_group *tg) 7813static
7814int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7802{ 7815{
7803 struct cfs_rq *cfs_rq; 7816 struct cfs_rq *cfs_rq;
7804 struct sched_entity *se; 7817 struct sched_entity *se, *parent_se;
7805 struct rq *rq; 7818 struct rq *rq;
7806 int i; 7819 int i;
7807 7820
@@ -7827,7 +7840,8 @@ static int alloc_fair_sched_group(struct task_group *tg)
7827 if (!se) 7840 if (!se)
7828 goto err; 7841 goto err;
7829 7842
7830 init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0); 7843 parent_se = parent ? parent->se[i] : NULL;
7844 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
7831 } 7845 }
7832 7846
7833 return 1; 7847 return 1;
@@ -7851,7 +7865,8 @@ static inline void free_fair_sched_group(struct task_group *tg)
7851{ 7865{
7852} 7866}
7853 7867
7854static inline int alloc_fair_sched_group(struct task_group *tg) 7868static inline
7869int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7855{ 7870{
7856 return 1; 7871 return 1;
7857} 7872}
@@ -7883,10 +7898,11 @@ static void free_rt_sched_group(struct task_group *tg)
7883 kfree(tg->rt_se); 7898 kfree(tg->rt_se);
7884} 7899}
7885 7900
7886static int alloc_rt_sched_group(struct task_group *tg) 7901static
7902int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
7887{ 7903{
7888 struct rt_rq *rt_rq; 7904 struct rt_rq *rt_rq;
7889 struct sched_rt_entity *rt_se; 7905 struct sched_rt_entity *rt_se, *parent_se;
7890 struct rq *rq; 7906 struct rq *rq;
7891 int i; 7907 int i;
7892 7908
@@ -7913,7 +7929,8 @@ static int alloc_rt_sched_group(struct task_group *tg)
7913 if (!rt_se) 7929 if (!rt_se)
7914 goto err; 7930 goto err;
7915 7931
7916 init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0); 7932 parent_se = parent ? parent->rt_se[i] : NULL;
7933 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
7917 } 7934 }
7918 7935
7919 return 1; 7936 return 1;
@@ -7937,7 +7954,8 @@ static inline void free_rt_sched_group(struct task_group *tg)
7937{ 7954{
7938} 7955}
7939 7956
7940static inline int alloc_rt_sched_group(struct task_group *tg) 7957static inline
7958int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
7941{ 7959{
7942 return 1; 7960 return 1;
7943} 7961}
@@ -7960,7 +7978,7 @@ static void free_sched_group(struct task_group *tg)
7960} 7978}
7961 7979
7962/* allocate runqueue etc for a new task group */ 7980/* allocate runqueue etc for a new task group */
7963struct task_group *sched_create_group(void) 7981struct task_group *sched_create_group(struct task_group *parent)
7964{ 7982{
7965 struct task_group *tg; 7983 struct task_group *tg;
7966 unsigned long flags; 7984 unsigned long flags;
@@ -7970,10 +7988,10 @@ struct task_group *sched_create_group(void)
7970 if (!tg) 7988 if (!tg)
7971 return ERR_PTR(-ENOMEM); 7989 return ERR_PTR(-ENOMEM);
7972 7990
7973 if (!alloc_fair_sched_group(tg)) 7991 if (!alloc_fair_sched_group(tg, parent))
7974 goto err; 7992 goto err;
7975 7993
7976 if (!alloc_rt_sched_group(tg)) 7994 if (!alloc_rt_sched_group(tg, parent))
7977 goto err; 7995 goto err;
7978 7996
7979 spin_lock_irqsave(&task_group_lock, flags); 7997 spin_lock_irqsave(&task_group_lock, flags);
@@ -8085,6 +8103,12 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8085 unsigned long flags; 8103 unsigned long flags;
8086 8104
8087 /* 8105 /*
8106 * We can't change the weight of the root cgroup.
8107 */
8108 if (!tg->se[0])
8109 return -EINVAL;
8110
8111 /*
8088 * A weight of 0 or 1 can cause arithmetics problems. 8112 * A weight of 0 or 1 can cause arithmetics problems.
8089 * (The default weight is 1024 - so there's no practical 8113 * (The default weight is 1024 - so there's no practical
8090 * limitation from this.) 8114 * limitation from this.)
@@ -8327,7 +8351,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
8327static struct cgroup_subsys_state * 8351static struct cgroup_subsys_state *
8328cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) 8352cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
8329{ 8353{
8330 struct task_group *tg; 8354 struct task_group *tg, *parent;
8331 8355
8332 if (!cgrp->parent) { 8356 if (!cgrp->parent) {
8333 /* This is early initialization for the top cgroup */ 8357 /* This is early initialization for the top cgroup */
@@ -8335,11 +8359,8 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
8335 return &init_task_group.css; 8359 return &init_task_group.css;
8336 } 8360 }
8337 8361
8338 /* we support only 1-level deep hierarchical scheduler atm */ 8362 parent = cgroup_tg(cgrp->parent);
8339 if (cgrp->parent->parent) 8363 tg = sched_create_group(parent);
8340 return ERR_PTR(-EINVAL);
8341
8342 tg = sched_create_group();
8343 if (IS_ERR(tg)) 8364 if (IS_ERR(tg))
8344 return ERR_PTR(-ENOMEM); 8365 return ERR_PTR(-ENOMEM);
8345 8366