diff options
-rw-r--r-- | include/linux/sched.h | 3 | ||||
-rw-r--r-- | kernel/sched.c | 43 | ||||
-rw-r--r-- | kernel/user.c | 2 |
3 files changed, 45 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index fa14781747cb..ada24022d230 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2051,6 +2051,9 @@ extern void normalize_rt_tasks(void); | |||
2051 | #ifdef CONFIG_GROUP_SCHED | 2051 | #ifdef CONFIG_GROUP_SCHED |
2052 | 2052 | ||
2053 | extern struct task_group init_task_group; | 2053 | extern struct task_group init_task_group; |
2054 | #ifdef CONFIG_USER_SCHED | ||
2055 | extern struct task_group root_task_group; | ||
2056 | #endif | ||
2054 | 2057 | ||
2055 | extern struct task_group *sched_create_group(struct task_group *parent); | 2058 | extern struct task_group *sched_create_group(struct task_group *parent); |
2056 | extern void sched_destroy_group(struct task_group *tg); | 2059 | extern void sched_destroy_group(struct task_group *tg); |
diff --git a/kernel/sched.c b/kernel/sched.c index f9c8da798bbf..e03b45ccf789 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -274,6 +274,14 @@ struct task_group { | |||
274 | }; | 274 | }; |
275 | 275 | ||
276 | #ifdef CONFIG_USER_SCHED | 276 | #ifdef CONFIG_USER_SCHED |
277 | |||
278 | /* | ||
279 | * Root task group. | ||
280 | * Every UID task group (including init_task_group aka UID-0) will | ||
281 | * be a child to this group. | ||
282 | */ | ||
283 | struct task_group root_task_group; | ||
284 | |||
277 | #ifdef CONFIG_FAIR_GROUP_SCHED | 285 | #ifdef CONFIG_FAIR_GROUP_SCHED |
278 | /* Default task group's sched entity on each cpu */ | 286 | /* Default task group's sched entity on each cpu */ |
279 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); | 287 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); |
@@ -285,6 +293,8 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; | |||
285 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 293 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
286 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; | 294 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; |
287 | #endif | 295 | #endif |
296 | #else | ||
297 | #define root_task_group init_task_group | ||
288 | #endif | 298 | #endif |
289 | 299 | ||
290 | /* task_group_lock serializes add/remove of task groups and also changes to | 300 | /* task_group_lock serializes add/remove of task groups and also changes to |
@@ -7508,6 +7518,9 @@ void __init sched_init(void) | |||
7508 | #ifdef CONFIG_RT_GROUP_SCHED | 7518 | #ifdef CONFIG_RT_GROUP_SCHED |
7509 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); | 7519 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
7510 | #endif | 7520 | #endif |
7521 | #ifdef CONFIG_USER_SCHED | ||
7522 | alloc_size *= 2; | ||
7523 | #endif | ||
7511 | /* | 7524 | /* |
7512 | * As sched_init() is called before page_alloc is setup, | 7525 | * As sched_init() is called before page_alloc is setup, |
7513 | * we use alloc_bootmem(). | 7526 | * we use alloc_bootmem(). |
@@ -7521,12 +7534,29 @@ void __init sched_init(void) | |||
7521 | 7534 | ||
7522 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; | 7535 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; |
7523 | ptr += nr_cpu_ids * sizeof(void **); | 7536 | ptr += nr_cpu_ids * sizeof(void **); |
7537 | |||
7538 | #ifdef CONFIG_USER_SCHED | ||
7539 | root_task_group.se = (struct sched_entity **)ptr; | ||
7540 | ptr += nr_cpu_ids * sizeof(void **); | ||
7541 | |||
7542 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; | ||
7543 | ptr += nr_cpu_ids * sizeof(void **); | ||
7544 | #endif | ||
7524 | #endif | 7545 | #endif |
7525 | #ifdef CONFIG_RT_GROUP_SCHED | 7546 | #ifdef CONFIG_RT_GROUP_SCHED |
7526 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; | 7547 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; |
7527 | ptr += nr_cpu_ids * sizeof(void **); | 7548 | ptr += nr_cpu_ids * sizeof(void **); |
7528 | 7549 | ||
7529 | init_task_group.rt_rq = (struct rt_rq **)ptr; | 7550 | init_task_group.rt_rq = (struct rt_rq **)ptr; |
7551 | ptr += nr_cpu_ids * sizeof(void **); | ||
7552 | |||
7553 | #ifdef CONFIG_USER_SCHED | ||
7554 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; | ||
7555 | ptr += nr_cpu_ids * sizeof(void **); | ||
7556 | |||
7557 | root_task_group.rt_rq = (struct rt_rq **)ptr; | ||
7558 | ptr += nr_cpu_ids * sizeof(void **); | ||
7559 | #endif | ||
7530 | #endif | 7560 | #endif |
7531 | } | 7561 | } |
7532 | 7562 | ||
@@ -7540,6 +7570,10 @@ void __init sched_init(void) | |||
7540 | #ifdef CONFIG_RT_GROUP_SCHED | 7570 | #ifdef CONFIG_RT_GROUP_SCHED |
7541 | init_rt_bandwidth(&init_task_group.rt_bandwidth, | 7571 | init_rt_bandwidth(&init_task_group.rt_bandwidth, |
7542 | global_rt_period(), global_rt_runtime()); | 7572 | global_rt_period(), global_rt_runtime()); |
7573 | #ifdef CONFIG_USER_SCHED | ||
7574 | init_rt_bandwidth(&root_task_group.rt_bandwidth, | ||
7575 | global_rt_period(), RUNTIME_INF); | ||
7576 | #endif | ||
7543 | #endif | 7577 | #endif |
7544 | 7578 | ||
7545 | #ifdef CONFIG_GROUP_SCHED | 7579 | #ifdef CONFIG_GROUP_SCHED |
@@ -7582,6 +7616,8 @@ void __init sched_init(void) | |||
7582 | */ | 7616 | */ |
7583 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); | 7617 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); |
7584 | #elif defined CONFIG_USER_SCHED | 7618 | #elif defined CONFIG_USER_SCHED |
7619 | root_task_group.shares = NICE_0_LOAD; | ||
7620 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL); | ||
7585 | /* | 7621 | /* |
7586 | * In case of task-groups formed thr' the user id of tasks, | 7622 | * In case of task-groups formed thr' the user id of tasks, |
7587 | * init_task_group represents tasks belonging to root user. | 7623 | * init_task_group represents tasks belonging to root user. |
@@ -7595,7 +7631,8 @@ void __init sched_init(void) | |||
7595 | */ | 7631 | */ |
7596 | init_tg_cfs_entry(&init_task_group, | 7632 | init_tg_cfs_entry(&init_task_group, |
7597 | &per_cpu(init_cfs_rq, i), | 7633 | &per_cpu(init_cfs_rq, i), |
7598 | &per_cpu(init_sched_entity, i), i, 1, NULL); | 7634 | &per_cpu(init_sched_entity, i), i, 1, |
7635 | root_task_group.se[i]); | ||
7599 | 7636 | ||
7600 | #endif | 7637 | #endif |
7601 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7638 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
@@ -7606,9 +7643,11 @@ void __init sched_init(void) | |||
7606 | #ifdef CONFIG_CGROUP_SCHED | 7643 | #ifdef CONFIG_CGROUP_SCHED |
7607 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); | 7644 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); |
7608 | #elif defined CONFIG_USER_SCHED | 7645 | #elif defined CONFIG_USER_SCHED |
7646 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | ||
7609 | init_tg_rt_entry(&init_task_group, | 7647 | init_tg_rt_entry(&init_task_group, |
7610 | &per_cpu(init_rt_rq, i), | 7648 | &per_cpu(init_rt_rq, i), |
7611 | &per_cpu(init_sched_rt_entity, i), i, 1, NULL); | 7649 | &per_cpu(init_sched_rt_entity, i), i, 1, |
7650 | root_task_group.rt_se[i]); | ||
7612 | #endif | 7651 | #endif |
7613 | #endif | 7652 | #endif |
7614 | 7653 | ||
diff --git a/kernel/user.c b/kernel/user.c index a28d9f992468..debce602bfdd 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -101,7 +101,7 @@ static int sched_create_user(struct user_struct *up) | |||
101 | { | 101 | { |
102 | int rc = 0; | 102 | int rc = 0; |
103 | 103 | ||
104 | up->tg = sched_create_group(NULL); | 104 | up->tg = sched_create_group(&root_task_group); |
105 | if (IS_ERR(up->tg)) | 105 | if (IS_ERR(up->tg)) |
106 | rc = -ENOMEM; | 106 | rc = -ENOMEM; |
107 | 107 | ||