diff options
author | Jan H. Schönherr <schnhrr@cs.tu-berlin.de> | 2011-07-14 12:32:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-07-21 12:01:54 -0400 |
commit | acb5a9ba3bd7cd8b3264f67a3789a9587d3b935b (patch) | |
tree | aa384f35760185200183a0a6f53ec8e4ce1d553f /kernel | |
parent | 26a148eb9c790149750f7e77da0d96029443d400 (diff) |
sched: Separate group-scheduling code more clearly
Clean up cfs/rt runqueue initialization by moving group scheduling
related code into the corresponding functions.
Also, keep group scheduling as an add-on, so that things are only done
additionally, i. e. remove the init_*_rq() calls from init_tg_*_entry().
(This removes a redundant initalization during sched_init()).
In case of group scheduling rt_rq->highest_prio.curr is now initialized
twice, but adding another #ifdef seems not worth it.
Signed-off-by: Jan H. Schönherr <schnhrr@cs.tu-berlin.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1310661163-16606-1-git-send-email-schnhrr@cs.tu-berlin.de
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 43 |
1 files changed, 19 insertions, 24 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e3f0bac05270..6fdf7ffbebc6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7859,17 +7859,10 @@ int in_sched_functions(unsigned long addr) | |||
7859 | && addr < (unsigned long)__sched_text_end); | 7859 | && addr < (unsigned long)__sched_text_end); |
7860 | } | 7860 | } |
7861 | 7861 | ||
7862 | static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) | 7862 | static void init_cfs_rq(struct cfs_rq *cfs_rq) |
7863 | { | 7863 | { |
7864 | cfs_rq->tasks_timeline = RB_ROOT; | 7864 | cfs_rq->tasks_timeline = RB_ROOT; |
7865 | INIT_LIST_HEAD(&cfs_rq->tasks); | 7865 | INIT_LIST_HEAD(&cfs_rq->tasks); |
7866 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7867 | cfs_rq->rq = rq; | ||
7868 | /* allow initial update_cfs_load() to truncate */ | ||
7869 | #ifdef CONFIG_SMP | ||
7870 | cfs_rq->load_stamp = 1; | ||
7871 | #endif | ||
7872 | #endif | ||
7873 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); | 7866 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); |
7874 | #ifndef CONFIG_64BIT | 7867 | #ifndef CONFIG_64BIT |
7875 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | 7868 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; |
@@ -7889,13 +7882,9 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
7889 | /* delimiter for bitsearch: */ | 7882 | /* delimiter for bitsearch: */ |
7890 | __set_bit(MAX_RT_PRIO, array->bitmap); | 7883 | __set_bit(MAX_RT_PRIO, array->bitmap); |
7891 | 7884 | ||
7892 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 7885 | #if defined CONFIG_SMP |
7893 | rt_rq->highest_prio.curr = MAX_RT_PRIO; | 7886 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
7894 | #ifdef CONFIG_SMP | ||
7895 | rt_rq->highest_prio.next = MAX_RT_PRIO; | 7887 | rt_rq->highest_prio.next = MAX_RT_PRIO; |
7896 | #endif | ||
7897 | #endif | ||
7898 | #ifdef CONFIG_SMP | ||
7899 | rt_rq->rt_nr_migratory = 0; | 7888 | rt_rq->rt_nr_migratory = 0; |
7900 | rt_rq->overloaded = 0; | 7889 | rt_rq->overloaded = 0; |
7901 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); | 7890 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); |
@@ -7905,11 +7894,6 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
7905 | rt_rq->rt_throttled = 0; | 7894 | rt_rq->rt_throttled = 0; |
7906 | rt_rq->rt_runtime = 0; | 7895 | rt_rq->rt_runtime = 0; |
7907 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); | 7896 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
7908 | |||
7909 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7910 | rt_rq->rt_nr_boosted = 0; | ||
7911 | rt_rq->rq = rq; | ||
7912 | #endif | ||
7913 | } | 7897 | } |
7914 | 7898 | ||
7915 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7899 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -7918,11 +7902,17 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | |||
7918 | struct sched_entity *parent) | 7902 | struct sched_entity *parent) |
7919 | { | 7903 | { |
7920 | struct rq *rq = cpu_rq(cpu); | 7904 | struct rq *rq = cpu_rq(cpu); |
7921 | tg->cfs_rq[cpu] = cfs_rq; | 7905 | |
7922 | init_cfs_rq(cfs_rq, rq); | ||
7923 | cfs_rq->tg = tg; | 7906 | cfs_rq->tg = tg; |
7907 | cfs_rq->rq = rq; | ||
7908 | #ifdef CONFIG_SMP | ||
7909 | /* allow initial update_cfs_load() to truncate */ | ||
7910 | cfs_rq->load_stamp = 1; | ||
7911 | #endif | ||
7924 | 7912 | ||
7913 | tg->cfs_rq[cpu] = cfs_rq; | ||
7925 | tg->se[cpu] = se; | 7914 | tg->se[cpu] = se; |
7915 | |||
7926 | /* se could be NULL for root_task_group */ | 7916 | /* se could be NULL for root_task_group */ |
7927 | if (!se) | 7917 | if (!se) |
7928 | return; | 7918 | return; |
@@ -7945,12 +7935,14 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, | |||
7945 | { | 7935 | { |
7946 | struct rq *rq = cpu_rq(cpu); | 7936 | struct rq *rq = cpu_rq(cpu); |
7947 | 7937 | ||
7948 | tg->rt_rq[cpu] = rt_rq; | 7938 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
7949 | init_rt_rq(rt_rq, rq); | 7939 | rt_rq->rt_nr_boosted = 0; |
7940 | rt_rq->rq = rq; | ||
7950 | rt_rq->tg = tg; | 7941 | rt_rq->tg = tg; |
7951 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; | ||
7952 | 7942 | ||
7943 | tg->rt_rq[cpu] = rt_rq; | ||
7953 | tg->rt_se[cpu] = rt_se; | 7944 | tg->rt_se[cpu] = rt_se; |
7945 | |||
7954 | if (!rt_se) | 7946 | if (!rt_se) |
7955 | return; | 7947 | return; |
7956 | 7948 | ||
@@ -8032,7 +8024,7 @@ void __init sched_init(void) | |||
8032 | rq->nr_running = 0; | 8024 | rq->nr_running = 0; |
8033 | rq->calc_load_active = 0; | 8025 | rq->calc_load_active = 0; |
8034 | rq->calc_load_update = jiffies + LOAD_FREQ; | 8026 | rq->calc_load_update = jiffies + LOAD_FREQ; |
8035 | init_cfs_rq(&rq->cfs, rq); | 8027 | init_cfs_rq(&rq->cfs); |
8036 | init_rt_rq(&rq->rt, rq); | 8028 | init_rt_rq(&rq->rt, rq); |
8037 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8029 | #ifdef CONFIG_FAIR_GROUP_SCHED |
8038 | root_task_group.shares = root_task_group_load; | 8030 | root_task_group.shares = root_task_group_load; |
@@ -8335,6 +8327,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8335 | if (!se) | 8327 | if (!se) |
8336 | goto err_free_rq; | 8328 | goto err_free_rq; |
8337 | 8329 | ||
8330 | init_cfs_rq(cfs_rq); | ||
8338 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); | 8331 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); |
8339 | } | 8332 | } |
8340 | 8333 | ||
@@ -8425,6 +8418,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8425 | if (!rt_se) | 8418 | if (!rt_se) |
8426 | goto err_free_rq; | 8419 | goto err_free_rq; |
8427 | 8420 | ||
8421 | init_rt_rq(rt_rq, cpu_rq(i)); | ||
8422 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; | ||
8428 | init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); | 8423 | init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); |
8429 | } | 8424 | } |
8430 | 8425 | ||