aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2005-11-09 00:39:00 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-09 10:56:32 -0500
commitede3d0fba99520f268067917b50858d788bc41da (patch)
tree5c33dc43c22f0c2b17db2f118156384baa35cae8
parent6dd4a85bb3ee0715415892c8b0f2a9bd08d31ca4 (diff)
[PATCH] sched: consider migration thread with smp nice
The intermittent scheduling of the migration thread at ultra high priority makes the smp nice handling see that runqueue as being heavily loaded. The migration thread itself actually handles the balancing so its influence on priority balancing should be ignored. Signed-off-by: Con Kolivas <kernel@kolivas.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/sched.c35
1 files changed, 26 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 502d47c883b6..0f2def822296 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -670,6 +670,31 @@ static inline void dec_prio_bias(runqueue_t *rq, int prio)
670{ 670{
671 rq->prio_bias -= MAX_PRIO - prio; 671 rq->prio_bias -= MAX_PRIO - prio;
672} 672}
673
674static inline void inc_nr_running(task_t *p, runqueue_t *rq)
675{
676 rq->nr_running++;
677 if (rt_task(p)) {
678 if (p != rq->migration_thread)
679 /*
680 * The migration thread does the actual balancing. Do
681 * not bias by its priority as the ultra high priority
682 * will skew balancing adversely.
683 */
684 inc_prio_bias(rq, p->prio);
685 } else
686 inc_prio_bias(rq, p->static_prio);
687}
688
689static inline void dec_nr_running(task_t *p, runqueue_t *rq)
690{
691 rq->nr_running--;
692 if (rt_task(p)) {
693 if (p != rq->migration_thread)
694 dec_prio_bias(rq, p->prio);
695 } else
696 dec_prio_bias(rq, p->static_prio);
697}
673#else 698#else
674static inline void inc_prio_bias(runqueue_t *rq, int prio) 699static inline void inc_prio_bias(runqueue_t *rq, int prio)
675{ 700{
@@ -678,25 +703,17 @@ static inline void inc_prio_bias(runqueue_t *rq, int prio)
678static inline void dec_prio_bias(runqueue_t *rq, int prio) 703static inline void dec_prio_bias(runqueue_t *rq, int prio)
679{ 704{
680} 705}
681#endif
682 706
683static inline void inc_nr_running(task_t *p, runqueue_t *rq) 707static inline void inc_nr_running(task_t *p, runqueue_t *rq)
684{ 708{
685 rq->nr_running++; 709 rq->nr_running++;
686 if (rt_task(p))
687 inc_prio_bias(rq, p->prio);
688 else
689 inc_prio_bias(rq, p->static_prio);
690} 710}
691 711
692static inline void dec_nr_running(task_t *p, runqueue_t *rq) 712static inline void dec_nr_running(task_t *p, runqueue_t *rq)
693{ 713{
694 rq->nr_running--; 714 rq->nr_running--;
695 if (rt_task(p))
696 dec_prio_bias(rq, p->prio);
697 else
698 dec_prio_bias(rq, p->static_prio);
699} 715}
716#endif
700 717
701/* 718/*
702 * __activate_task - move a task to the runqueue. 719 * __activate_task - move a task to the runqueue.