diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-01-25 15:08:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:07 -0500 |
commit | 4642dafdf93dc7d66ee33437b93a5e6b8cea20d2 (patch) | |
tree | 4e3fd9c95be1ed14e9f40b2dcf232c40e5ab8fef | |
parent | f65eda4f789168ba5ff3fa75546c29efeed19f58 (diff) |
sched: push RT tasks from overloaded CPUs
This patch adds pushing of overloaded RT tasks from a runqueue that is
having tasks (most likely RT tasks) added to the run queue.
TODO: We don't cover the case of waking of new RT tasks (yet).
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 1 | ||||
-rw-r--r-- | kernel/sched_rt.c | 10 |
2 files changed, 11 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c91797107913..357d3a084de8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1710,6 +1710,7 @@ out_activate: | |||
1710 | 1710 | ||
1711 | out_running: | 1711 | out_running: |
1712 | p->state = TASK_RUNNING; | 1712 | p->state = TASK_RUNNING; |
1713 | wakeup_balance_rt(rq, p); | ||
1713 | out: | 1714 | out: |
1714 | task_rq_unlock(rq, &flags); | 1715 | task_rq_unlock(rq, &flags); |
1715 | 1716 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index bacb32039e95..d38a8a559aa5 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -558,6 +558,15 @@ static void schedule_tail_balance_rt(struct rq *rq) | |||
558 | } | 558 | } |
559 | } | 559 | } |
560 | 560 | ||
561 | |||
562 | static void wakeup_balance_rt(struct rq *rq, struct task_struct *p) | ||
563 | { | ||
564 | if (unlikely(rt_task(p)) && | ||
565 | !task_running(rq, p) && | ||
566 | (p->prio >= rq->curr->prio)) | ||
567 | push_rt_tasks(rq); | ||
568 | } | ||
569 | |||
561 | /* | 570 | /* |
562 | * Load-balancing iterator. Note: while the runqueue stays locked | 571 | * Load-balancing iterator. Note: while the runqueue stays locked |
563 | * during the whole iteration, the current task might be | 572 | * during the whole iteration, the current task might be |
@@ -665,6 +674,7 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
665 | #else /* CONFIG_SMP */ | 674 | #else /* CONFIG_SMP */ |
666 | # define schedule_tail_balance_rt(rq) do { } while (0) | 675 | # define schedule_tail_balance_rt(rq) do { } while (0) |
667 | # define schedule_balance_rt(rq, prev) do { } while (0) | 676 | # define schedule_balance_rt(rq, prev) do { } while (0) |
677 | # define wakeup_balance_rt(rq, p) do { } while (0) | ||
668 | #endif /* CONFIG_SMP */ | 678 | #endif /* CONFIG_SMP */ |
669 | 679 | ||
670 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | 680 | static void task_tick_rt(struct rq *rq, struct task_struct *p) |