diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-04-19 13:45:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:45:00 -0400 |
commit | eff766a65c60237bfa865160c3129de31fab591b (patch) | |
tree | c003be272be4bd614485606c77893295f3b175a2 /kernel/sched.c | |
parent | ec7dc8ac73e4a56ed03b673f026f08c0d547f597 (diff) |
sched: fix the task_group hierarchy for UID grouping
UID grouping doesn't actually have a task_group representing the root of
the task_group tree. Add one.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 43 |
1 files changed, 41 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f9c8da798bbf..e03b45ccf789 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -274,6 +274,14 @@ struct task_group { | |||
274 | }; | 274 | }; |
275 | 275 | ||
276 | #ifdef CONFIG_USER_SCHED | 276 | #ifdef CONFIG_USER_SCHED |
277 | |||
278 | /* | ||
279 | * Root task group. | ||
280 | * Every UID task group (including init_task_group aka UID-0) will | ||
281 | * be a child to this group. | ||
282 | */ | ||
283 | struct task_group root_task_group; | ||
284 | |||
277 | #ifdef CONFIG_FAIR_GROUP_SCHED | 285 | #ifdef CONFIG_FAIR_GROUP_SCHED |
278 | /* Default task group's sched entity on each cpu */ | 286 | /* Default task group's sched entity on each cpu */ |
279 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); | 287 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); |
@@ -285,6 +293,8 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; | |||
285 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 293 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
286 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; | 294 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; |
287 | #endif | 295 | #endif |
296 | #else | ||
297 | #define root_task_group init_task_group | ||
288 | #endif | 298 | #endif |
289 | 299 | ||
290 | /* task_group_lock serializes add/remove of task groups and also changes to | 300 | /* task_group_lock serializes add/remove of task groups and also changes to |
@@ -7508,6 +7518,9 @@ void __init sched_init(void) | |||
7508 | #ifdef CONFIG_RT_GROUP_SCHED | 7518 | #ifdef CONFIG_RT_GROUP_SCHED |
7509 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); | 7519 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
7510 | #endif | 7520 | #endif |
7521 | #ifdef CONFIG_USER_SCHED | ||
7522 | alloc_size *= 2; | ||
7523 | #endif | ||
7511 | /* | 7524 | /* |
7512 | * As sched_init() is called before page_alloc is setup, | 7525 | * As sched_init() is called before page_alloc is setup, |
7513 | * we use alloc_bootmem(). | 7526 | * we use alloc_bootmem(). |
@@ -7521,12 +7534,29 @@ void __init sched_init(void) | |||
7521 | 7534 | ||
7522 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; | 7535 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; |
7523 | ptr += nr_cpu_ids * sizeof(void **); | 7536 | ptr += nr_cpu_ids * sizeof(void **); |
7537 | |||
7538 | #ifdef CONFIG_USER_SCHED | ||
7539 | root_task_group.se = (struct sched_entity **)ptr; | ||
7540 | ptr += nr_cpu_ids * sizeof(void **); | ||
7541 | |||
7542 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; | ||
7543 | ptr += nr_cpu_ids * sizeof(void **); | ||
7544 | #endif | ||
7524 | #endif | 7545 | #endif |
7525 | #ifdef CONFIG_RT_GROUP_SCHED | 7546 | #ifdef CONFIG_RT_GROUP_SCHED |
7526 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; | 7547 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; |
7527 | ptr += nr_cpu_ids * sizeof(void **); | 7548 | ptr += nr_cpu_ids * sizeof(void **); |
7528 | 7549 | ||
7529 | init_task_group.rt_rq = (struct rt_rq **)ptr; | 7550 | init_task_group.rt_rq = (struct rt_rq **)ptr; |
7551 | ptr += nr_cpu_ids * sizeof(void **); | ||
7552 | |||
7553 | #ifdef CONFIG_USER_SCHED | ||
7554 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; | ||
7555 | ptr += nr_cpu_ids * sizeof(void **); | ||
7556 | |||
7557 | root_task_group.rt_rq = (struct rt_rq **)ptr; | ||
7558 | ptr += nr_cpu_ids * sizeof(void **); | ||
7559 | #endif | ||
7530 | #endif | 7560 | #endif |
7531 | } | 7561 | } |
7532 | 7562 | ||
@@ -7540,6 +7570,10 @@ void __init sched_init(void) | |||
7540 | #ifdef CONFIG_RT_GROUP_SCHED | 7570 | #ifdef CONFIG_RT_GROUP_SCHED |
7541 | init_rt_bandwidth(&init_task_group.rt_bandwidth, | 7571 | init_rt_bandwidth(&init_task_group.rt_bandwidth, |
7542 | global_rt_period(), global_rt_runtime()); | 7572 | global_rt_period(), global_rt_runtime()); |
7573 | #ifdef CONFIG_USER_SCHED | ||
7574 | init_rt_bandwidth(&root_task_group.rt_bandwidth, | ||
7575 | global_rt_period(), RUNTIME_INF); | ||
7576 | #endif | ||
7543 | #endif | 7577 | #endif |
7544 | 7578 | ||
7545 | #ifdef CONFIG_GROUP_SCHED | 7579 | #ifdef CONFIG_GROUP_SCHED |
@@ -7582,6 +7616,8 @@ void __init sched_init(void) | |||
7582 | */ | 7616 | */ |
7583 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); | 7617 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); |
7584 | #elif defined CONFIG_USER_SCHED | 7618 | #elif defined CONFIG_USER_SCHED |
7619 | root_task_group.shares = NICE_0_LOAD; | ||
7620 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL); | ||
7585 | /* | 7621 | /* |
7586 | * In case of task-groups formed thr' the user id of tasks, | 7622 | * In case of task-groups formed thr' the user id of tasks, |
7587 | * init_task_group represents tasks belonging to root user. | 7623 | * init_task_group represents tasks belonging to root user. |
@@ -7595,7 +7631,8 @@ void __init sched_init(void) | |||
7595 | */ | 7631 | */ |
7596 | init_tg_cfs_entry(&init_task_group, | 7632 | init_tg_cfs_entry(&init_task_group, |
7597 | &per_cpu(init_cfs_rq, i), | 7633 | &per_cpu(init_cfs_rq, i), |
7598 | &per_cpu(init_sched_entity, i), i, 1, NULL); | 7634 | &per_cpu(init_sched_entity, i), i, 1, |
7635 | root_task_group.se[i]); | ||
7599 | 7636 | ||
7600 | #endif | 7637 | #endif |
7601 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7638 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
@@ -7606,9 +7643,11 @@ void __init sched_init(void) | |||
7606 | #ifdef CONFIG_CGROUP_SCHED | 7643 | #ifdef CONFIG_CGROUP_SCHED |
7607 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); | 7644 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); |
7608 | #elif defined CONFIG_USER_SCHED | 7645 | #elif defined CONFIG_USER_SCHED |
7646 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | ||
7609 | init_tg_rt_entry(&init_task_group, | 7647 | init_tg_rt_entry(&init_task_group, |
7610 | &per_cpu(init_rt_rq, i), | 7648 | &per_cpu(init_rt_rq, i), |
7611 | &per_cpu(init_sched_rt_entity, i), i, 1, NULL); | 7649 | &per_cpu(init_sched_rt_entity, i), i, 1, |
7650 | root_task_group.rt_se[i]); | ||
7612 | #endif | 7651 | #endif |
7613 | #endif | 7652 | #endif |
7614 | 7653 | ||