diff options
author | Yong Zhang <yong.zhang0@gmail.com> | 2011-01-07 02:17:36 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-01-07 09:54:34 -0500 |
commit | 07e06b011db2b3300f6c975ebf293fc4c8c59942 (patch) | |
tree | f6f0fdd7c1707aa9badf16d1ceb4de1e16adb9e9 /kernel/sched.c | |
parent | cb600d2f83c854ec3d6660063e4466431999489b (diff) |
sched: Consolidate the name of root_task_group and init_task_group
root_task_group is the leftover of USER_SCHED, now it's always
same to init_task_group.
But as Mike suggested, root_task_group is maybe the suitable name
to keep for a tree.
So in this patch:
init_task_group --> root_task_group
init_task_group_load --> root_task_group_load
INIT_TASK_GROUP_LOAD --> ROOT_TASK_GROUP_LOAD
Suggested-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20110107071736.GA32635@windriver.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 42 |
1 files changed, 20 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 04949089e760..54b58ec99fd4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -278,14 +278,12 @@ struct task_group { | |||
278 | #endif | 278 | #endif |
279 | }; | 279 | }; |
280 | 280 | ||
281 | #define root_task_group init_task_group | ||
282 | |||
283 | /* task_group_lock serializes the addition/removal of task groups */ | 281 | /* task_group_lock serializes the addition/removal of task groups */ |
284 | static DEFINE_SPINLOCK(task_group_lock); | 282 | static DEFINE_SPINLOCK(task_group_lock); |
285 | 283 | ||
286 | #ifdef CONFIG_FAIR_GROUP_SCHED | 284 | #ifdef CONFIG_FAIR_GROUP_SCHED |
287 | 285 | ||
288 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD | 286 | # define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
289 | 287 | ||
290 | /* | 288 | /* |
291 | * A weight of 0 or 1 can cause arithmetics problems. | 289 | * A weight of 0 or 1 can cause arithmetics problems. |
@@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock); | |||
298 | #define MIN_SHARES 2 | 296 | #define MIN_SHARES 2 |
299 | #define MAX_SHARES (1UL << 18) | 297 | #define MAX_SHARES (1UL << 18) |
300 | 298 | ||
301 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; | 299 | static int root_task_group_load = ROOT_TASK_GROUP_LOAD; |
302 | #endif | 300 | #endif |
303 | 301 | ||
304 | /* Default task group. | 302 | /* Default task group. |
305 | * Every task in system belong to this group at bootup. | 303 | * Every task in system belong to this group at bootup. |
306 | */ | 304 | */ |
307 | struct task_group init_task_group; | 305 | struct task_group root_task_group; |
308 | 306 | ||
309 | #endif /* CONFIG_CGROUP_SCHED */ | 307 | #endif /* CONFIG_CGROUP_SCHED */ |
310 | 308 | ||
@@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | |||
7848 | cfs_rq->tg = tg; | 7846 | cfs_rq->tg = tg; |
7849 | 7847 | ||
7850 | tg->se[cpu] = se; | 7848 | tg->se[cpu] = se; |
7851 | /* se could be NULL for init_task_group */ | 7849 | /* se could be NULL for root_task_group */ |
7852 | if (!se) | 7850 | if (!se) |
7853 | return; | 7851 | return; |
7854 | 7852 | ||
@@ -7908,18 +7906,18 @@ void __init sched_init(void) | |||
7908 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); | 7906 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
7909 | 7907 | ||
7910 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7908 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7911 | init_task_group.se = (struct sched_entity **)ptr; | 7909 | root_task_group.se = (struct sched_entity **)ptr; |
7912 | ptr += nr_cpu_ids * sizeof(void **); | 7910 | ptr += nr_cpu_ids * sizeof(void **); |
7913 | 7911 | ||
7914 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; | 7912 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; |
7915 | ptr += nr_cpu_ids * sizeof(void **); | 7913 | ptr += nr_cpu_ids * sizeof(void **); |
7916 | 7914 | ||
7917 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7915 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
7918 | #ifdef CONFIG_RT_GROUP_SCHED | 7916 | #ifdef CONFIG_RT_GROUP_SCHED |
7919 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; | 7917 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; |
7920 | ptr += nr_cpu_ids * sizeof(void **); | 7918 | ptr += nr_cpu_ids * sizeof(void **); |
7921 | 7919 | ||
7922 | init_task_group.rt_rq = (struct rt_rq **)ptr; | 7920 | root_task_group.rt_rq = (struct rt_rq **)ptr; |
7923 | ptr += nr_cpu_ids * sizeof(void **); | 7921 | ptr += nr_cpu_ids * sizeof(void **); |
7924 | 7922 | ||
7925 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7923 | #endif /* CONFIG_RT_GROUP_SCHED */ |
@@ -7939,13 +7937,13 @@ void __init sched_init(void) | |||
7939 | global_rt_period(), global_rt_runtime()); | 7937 | global_rt_period(), global_rt_runtime()); |
7940 | 7938 | ||
7941 | #ifdef CONFIG_RT_GROUP_SCHED | 7939 | #ifdef CONFIG_RT_GROUP_SCHED |
7942 | init_rt_bandwidth(&init_task_group.rt_bandwidth, | 7940 | init_rt_bandwidth(&root_task_group.rt_bandwidth, |
7943 | global_rt_period(), global_rt_runtime()); | 7941 | global_rt_period(), global_rt_runtime()); |
7944 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7942 | #endif /* CONFIG_RT_GROUP_SCHED */ |
7945 | 7943 | ||
7946 | #ifdef CONFIG_CGROUP_SCHED | 7944 | #ifdef CONFIG_CGROUP_SCHED |
7947 | list_add(&init_task_group.list, &task_groups); | 7945 | list_add(&root_task_group.list, &task_groups); |
7948 | INIT_LIST_HEAD(&init_task_group.children); | 7946 | INIT_LIST_HEAD(&root_task_group.children); |
7949 | autogroup_init(&init_task); | 7947 | autogroup_init(&init_task); |
7950 | #endif /* CONFIG_CGROUP_SCHED */ | 7948 | #endif /* CONFIG_CGROUP_SCHED */ |
7951 | 7949 | ||
@@ -7960,34 +7958,34 @@ void __init sched_init(void) | |||
7960 | init_cfs_rq(&rq->cfs, rq); | 7958 | init_cfs_rq(&rq->cfs, rq); |
7961 | init_rt_rq(&rq->rt, rq); | 7959 | init_rt_rq(&rq->rt, rq); |
7962 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7960 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7963 | init_task_group.shares = init_task_group_load; | 7961 | root_task_group.shares = root_task_group_load; |
7964 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); | 7962 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
7965 | /* | 7963 | /* |
7966 | * How much cpu bandwidth does init_task_group get? | 7964 | * How much cpu bandwidth does root_task_group get? |
7967 | * | 7965 | * |
7968 | * In case of task-groups formed thr' the cgroup filesystem, it | 7966 | * In case of task-groups formed thr' the cgroup filesystem, it |
7969 | * gets 100% of the cpu resources in the system. This overall | 7967 | * gets 100% of the cpu resources in the system. This overall |
7970 | * system cpu resource is divided among the tasks of | 7968 | * system cpu resource is divided among the tasks of |
7971 | * init_task_group and its child task-groups in a fair manner, | 7969 | * root_task_group and its child task-groups in a fair manner, |
7972 | * based on each entity's (task or task-group's) weight | 7970 | * based on each entity's (task or task-group's) weight |
7973 | * (se->load.weight). | 7971 | * (se->load.weight). |
7974 | * | 7972 | * |
7975 | * In other words, if init_task_group has 10 tasks of weight | 7973 | * In other words, if root_task_group has 10 tasks of weight |
7976 | * 1024) and two child groups A0 and A1 (of weight 1024 each), | 7974 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
7977 | * then A0's share of the cpu resource is: | 7975 | * then A0's share of the cpu resource is: |
7978 | * | 7976 | * |
7979 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% | 7977 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
7980 | * | 7978 | * |
7981 | * We achieve this by letting init_task_group's tasks sit | 7979 | * We achieve this by letting root_task_group's tasks sit |
7982 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). | 7980 | * directly in rq->cfs (i.e root_task_group->se[] = NULL). |
7983 | */ | 7981 | */ |
7984 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL); | 7982 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); |
7985 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7983 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
7986 | 7984 | ||
7987 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; | 7985 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; |
7988 | #ifdef CONFIG_RT_GROUP_SCHED | 7986 | #ifdef CONFIG_RT_GROUP_SCHED |
7989 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); | 7987 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); |
7990 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL); | 7988 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); |
7991 | #endif | 7989 | #endif |
7992 | 7990 | ||
7993 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) | 7991 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
@@ -8812,7 +8810,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
8812 | 8810 | ||
8813 | if (!cgrp->parent) { | 8811 | if (!cgrp->parent) { |
8814 | /* This is early initialization for the top cgroup */ | 8812 | /* This is early initialization for the top cgroup */ |
8815 | return &init_task_group.css; | 8813 | return &root_task_group.css; |
8816 | } | 8814 | } |
8817 | 8815 | ||
8818 | parent = cgroup_tg(cgrp->parent); | 8816 | parent = cgroup_tg(cgrp->parent); |