diff options
author | Dhaval Giani <dhaval@linux.vnet.ibm.com> | 2008-04-19 13:44:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:44:59 -0400 |
commit | 354d60c2ff72d86627dfe2089d186824abf4bb8e (patch) | |
tree | 10cea61ce7036448ed7246820c5575df2a61bb3b /kernel/sched.c | |
parent | ea736ed5d353d7a3aa1cf8ce4cf8d947bc353fb2 (diff) |
sched: mix tasks and groups
This patch allows tasks and groups to exist in the same cfs_rq. With this
change the CFS group scheduling follows a 1/(M+N) model from a 1/(1+N)
fairness model where M tasks and N groups exist at the cfs_rq level.
[a.p.zijlstra@chello.nl: rt bits and assorted fixes]
Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 51 |
1 files changed, 49 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 62830eaec52f..1b7399dfa361 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -273,6 +273,7 @@ struct task_group { | |||
273 | struct list_head list; | 273 | struct list_head list; |
274 | }; | 274 | }; |
275 | 275 | ||
276 | #ifdef CONFIG_USER_SCHED | ||
276 | #ifdef CONFIG_FAIR_GROUP_SCHED | 277 | #ifdef CONFIG_FAIR_GROUP_SCHED |
277 | /* Default task group's sched entity on each cpu */ | 278 | /* Default task group's sched entity on each cpu */ |
278 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); | 279 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); |
@@ -284,6 +285,7 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; | |||
284 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 285 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
285 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; | 286 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; |
286 | #endif | 287 | #endif |
288 | #endif | ||
287 | 289 | ||
288 | /* task_group_lock serializes add/remove of task groups and also changes to | 290 | /* task_group_lock serializes add/remove of task groups and also changes to |
289 | * a task group's cpu shares. | 291 | * a task group's cpu shares. |
@@ -7447,6 +7449,10 @@ static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg, | |||
7447 | list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); | 7449 | list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); |
7448 | 7450 | ||
7449 | tg->se[cpu] = se; | 7451 | tg->se[cpu] = se; |
7452 | /* se could be NULL for init_task_group */ | ||
7453 | if (!se) | ||
7454 | return; | ||
7455 | |||
7450 | se->cfs_rq = &rq->cfs; | 7456 | se->cfs_rq = &rq->cfs; |
7451 | se->my_q = cfs_rq; | 7457 | se->my_q = cfs_rq; |
7452 | se->load.weight = tg->shares; | 7458 | se->load.weight = tg->shares; |
@@ -7469,6 +7475,9 @@ static void init_tg_rt_entry(struct rq *rq, struct task_group *tg, | |||
7469 | list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); | 7475 | list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); |
7470 | 7476 | ||
7471 | tg->rt_se[cpu] = rt_se; | 7477 | tg->rt_se[cpu] = rt_se; |
7478 | if (!rt_se) | ||
7479 | return; | ||
7480 | |||
7472 | rt_se->rt_rq = &rq->rt; | 7481 | rt_se->rt_rq = &rq->rt; |
7473 | rt_se->my_q = rt_rq; | 7482 | rt_se->my_q = rt_rq; |
7474 | rt_se->parent = NULL; | 7483 | rt_se->parent = NULL; |
@@ -7539,18 +7548,56 @@ void __init sched_init(void) | |||
7539 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7548 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7540 | init_task_group.shares = init_task_group_load; | 7549 | init_task_group.shares = init_task_group_load; |
7541 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); | 7550 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
7551 | #ifdef CONFIG_CGROUP_SCHED | ||
7552 | /* | ||
7553 | * How much cpu bandwidth does init_task_group get? | ||
7554 | * | ||
7555 | * In case of task-groups formed thr' the cgroup filesystem, it | ||
7556 | * gets 100% of the cpu resources in the system. This overall | ||
7557 | * system cpu resource is divided among the tasks of | ||
7558 | * init_task_group and its child task-groups in a fair manner, | ||
7559 | * based on each entity's (task or task-group's) weight | ||
7560 | * (se->load.weight). | ||
7561 | * | ||
7562 | * In other words, if init_task_group has 10 tasks of weight | ||
7563 | * 1024) and two child groups A0 and A1 (of weight 1024 each), | ||
7564 | * then A0's share of the cpu resource is: | ||
7565 | * | ||
7566 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% | ||
7567 | * | ||
7568 | * We achieve this by letting init_task_group's tasks sit | ||
7569 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). | ||
7570 | */ | ||
7571 | init_tg_cfs_entry(rq, &init_task_group, &rq->cfs, NULL, i, 1); | ||
7572 | #elif defined CONFIG_USER_SCHED | ||
7573 | /* | ||
7574 | * In case of task-groups formed thr' the user id of tasks, | ||
7575 | * init_task_group represents tasks belonging to root user. | ||
7576 | * Hence it forms a sibling of all subsequent groups formed. | ||
7577 | * In this case, init_task_group gets only a fraction of overall | ||
7578 | * system cpu resource, based on the weight assigned to root | ||
7579 | * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished | ||
7580 | * by letting tasks of init_task_group sit in a separate cfs_rq | ||
7581 | * (init_cfs_rq) and having one entity represent this group of | ||
7582 | * tasks in rq->cfs (i.e init_task_group->se[] != NULL). | ||
7583 | */ | ||
7542 | init_tg_cfs_entry(rq, &init_task_group, | 7584 | init_tg_cfs_entry(rq, &init_task_group, |
7543 | &per_cpu(init_cfs_rq, i), | 7585 | &per_cpu(init_cfs_rq, i), |
7544 | &per_cpu(init_sched_entity, i), i, 1); | 7586 | &per_cpu(init_sched_entity, i), i, 1); |
7545 | 7587 | ||
7546 | #endif | 7588 | #endif |
7589 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
7590 | |||
7591 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; | ||
7547 | #ifdef CONFIG_RT_GROUP_SCHED | 7592 | #ifdef CONFIG_RT_GROUP_SCHED |
7548 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); | 7593 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); |
7594 | #ifdef CONFIG_CGROUP_SCHED | ||
7595 | init_tg_rt_entry(rq, &init_task_group, &rq->rt, NULL, i, 1); | ||
7596 | #elif defined CONFIG_USER_SCHED | ||
7549 | init_tg_rt_entry(rq, &init_task_group, | 7597 | init_tg_rt_entry(rq, &init_task_group, |
7550 | &per_cpu(init_rt_rq, i), | 7598 | &per_cpu(init_rt_rq, i), |
7551 | &per_cpu(init_sched_rt_entity, i), i, 1); | 7599 | &per_cpu(init_sched_rt_entity, i), i, 1); |
7552 | #else | 7600 | #endif |
7553 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; | ||
7554 | #endif | 7601 | #endif |
7555 | 7602 | ||
7556 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) | 7603 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |