aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2008-10-29 05:03:22 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-29 06:53:26 -0400
commiteab172294d5e24464f332dd8e94a57a9819c81c4 (patch)
treef8f577d52aa83b76cd7f044a8327f236bb35f835 /kernel/sched.c
parente946217e4fdaa67681bbabfa8e6b18641921f750 (diff)
sched: cleanup for alloc_rt/fair_sched_group()
Impact: cleanup Remove checking parent == NULL. It won't be NULLL, because we dynamically create sub task_group only, and sub task_group always has its parent. (root task_group is statically defined) Also replace kmalloc_node(GFP_ZERO) with kzalloc_node(). Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e8819bc6f462..7dd6c860773b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8472,7 +8472,7 @@ static
8472int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 8472int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8473{ 8473{
8474 struct cfs_rq *cfs_rq; 8474 struct cfs_rq *cfs_rq;
8475 struct sched_entity *se, *parent_se; 8475 struct sched_entity *se;
8476 struct rq *rq; 8476 struct rq *rq;
8477 int i; 8477 int i;
8478 8478
@@ -8488,18 +8488,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8488 for_each_possible_cpu(i) { 8488 for_each_possible_cpu(i) {
8489 rq = cpu_rq(i); 8489 rq = cpu_rq(i);
8490 8490
8491 cfs_rq = kmalloc_node(sizeof(struct cfs_rq), 8491 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8492 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8492 GFP_KERNEL, cpu_to_node(i));
8493 if (!cfs_rq) 8493 if (!cfs_rq)
8494 goto err; 8494 goto err;
8495 8495
8496 se = kmalloc_node(sizeof(struct sched_entity), 8496 se = kzalloc_node(sizeof(struct sched_entity),
8497 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8497 GFP_KERNEL, cpu_to_node(i));
8498 if (!se) 8498 if (!se)
8499 goto err; 8499 goto err;
8500 8500
8501 parent_se = parent ? parent->se[i] : NULL; 8501 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
8502 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
8503 } 8502 }
8504 8503
8505 return 1; 8504 return 1;
@@ -8560,7 +8559,7 @@ static
8560int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 8559int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8561{ 8560{
8562 struct rt_rq *rt_rq; 8561 struct rt_rq *rt_rq;
8563 struct sched_rt_entity *rt_se, *parent_se; 8562 struct sched_rt_entity *rt_se;
8564 struct rq *rq; 8563 struct rq *rq;
8565 int i; 8564 int i;
8566 8565
@@ -8577,18 +8576,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8577 for_each_possible_cpu(i) { 8576 for_each_possible_cpu(i) {
8578 rq = cpu_rq(i); 8577 rq = cpu_rq(i);
8579 8578
8580 rt_rq = kmalloc_node(sizeof(struct rt_rq), 8579 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8581 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8580 GFP_KERNEL, cpu_to_node(i));
8582 if (!rt_rq) 8581 if (!rt_rq)
8583 goto err; 8582 goto err;
8584 8583
8585 rt_se = kmalloc_node(sizeof(struct sched_rt_entity), 8584 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8586 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8585 GFP_KERNEL, cpu_to_node(i));
8587 if (!rt_se) 8586 if (!rt_se)
8588 goto err; 8587 goto err;
8589 8588
8590 parent_se = parent ? parent->rt_se[i] : NULL; 8589 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
8591 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
8592 } 8590 }
8593 8591
8594 return 1; 8592 return 1;