aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-04-19 13:45:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:45:00 -0400
commit18d95a2832c1392a2d63227a7a6d433cb9f2037e (patch)
treefa85b700aa3caac5b1309edd8e31d9b957957a83 /kernel/sched_rt.c
parent1d3504fcf5606579d60b649d19f44b3871c1ddae (diff)
sched: fair-group: SMP-nice for group scheduling
Implement SMP nice support for the full group hierarchy. On each load-balance action, compile a sched_domain wide view of the full task_group tree. We compute the domain wide view when walking down the hierarchy, and readjust the weights when walking back up. After collecting and readjusting the domain wide view, we try to balance the tasks within the task_groups. The current approach is a naively balance each task group until we've moved the targeted amount of load. Inspired by Srivatsa Vaddsgiri's previous code and Abhishek Chandra's H-SMP paper. XXX: there will be some numerical issues due to the limited nature of SCHED_LOAD_SCALE wrt to representing a task_groups influence on the total weight. When the tree is deep enough, or the task weight small enough, we'll run out of bits. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> CC: Abhishek Chandra <chandra@cs.umn.edu> CC: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 201a69382a42..736fb8fd8977 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -518,6 +518,8 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
518 */ 518 */
519 for_each_sched_rt_entity(rt_se) 519 for_each_sched_rt_entity(rt_se)
520 enqueue_rt_entity(rt_se); 520 enqueue_rt_entity(rt_se);
521
522 inc_cpu_load(rq, p->se.load.weight);
521} 523}
522 524
523static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 525static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -537,6 +539,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
537 if (rt_rq && rt_rq->rt_nr_running) 539 if (rt_rq && rt_rq->rt_nr_running)
538 enqueue_rt_entity(rt_se); 540 enqueue_rt_entity(rt_se);
539 } 541 }
542
543 dec_cpu_load(rq, p->se.load.weight);
540} 544}
541 545
542/* 546/*