aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-01-25 15:08:22 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:22 -0500
commit9a897c5a6701bcb6f099f7ca20194999102729fd (patch)
tree9c5415d5e2dd115660200cbd246fe1343cd5cd5c /kernel/sched_rt.c
parent4bf0b77158d581c952af237aec79d0604b78fe27 (diff)
sched: RT-balance, replace hooks with pre/post schedule and wakeup methods
To make the main sched.c code more agnostic to the schedule classes. Instead of having specific hooks in the schedule code for the RT class balancing. They are replaced with a pre_schedule, post_schedule and task_wake_up methods. These methods may be used by any of the classes but currently, only the sched_rt class implements them. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3ea0cae513d2..a5a45104603a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -689,14 +689,14 @@ static int pull_rt_task(struct rq *this_rq)
689 return ret; 689 return ret;
690} 690}
691 691
692static void schedule_balance_rt(struct rq *rq, struct task_struct *prev) 692static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
693{ 693{
694 /* Try to pull RT tasks here if we lower this rq's prio */ 694 /* Try to pull RT tasks here if we lower this rq's prio */
695 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 695 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
696 pull_rt_task(rq); 696 pull_rt_task(rq);
697} 697}
698 698
699static void schedule_tail_balance_rt(struct rq *rq) 699static void post_schedule_rt(struct rq *rq)
700{ 700{
701 /* 701 /*
702 * If we have more than one rt_task queued, then 702 * If we have more than one rt_task queued, then
@@ -713,10 +713,9 @@ static void schedule_tail_balance_rt(struct rq *rq)
713} 713}
714 714
715 715
716static void wakeup_balance_rt(struct rq *rq, struct task_struct *p) 716static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
717{ 717{
718 if (unlikely(rt_task(p)) && 718 if (!task_running(rq, p) &&
719 !task_running(rq, p) &&
720 (p->prio >= rq->rt.highest_prio) && 719 (p->prio >= rq->rt.highest_prio) &&
721 rq->rt.overloaded) 720 rq->rt.overloaded)
722 push_rt_tasks(rq); 721 push_rt_tasks(rq);
@@ -780,11 +779,6 @@ static void leave_domain_rt(struct rq *rq)
780 if (rq->rt.overloaded) 779 if (rq->rt.overloaded)
781 rt_clear_overload(rq); 780 rt_clear_overload(rq);
782} 781}
783
784#else /* CONFIG_SMP */
785# define schedule_tail_balance_rt(rq) do { } while (0)
786# define schedule_balance_rt(rq, prev) do { } while (0)
787# define wakeup_balance_rt(rq, p) do { } while (0)
788#endif /* CONFIG_SMP */ 782#endif /* CONFIG_SMP */
789 783
790static void task_tick_rt(struct rq *rq, struct task_struct *p) 784static void task_tick_rt(struct rq *rq, struct task_struct *p)
@@ -840,6 +834,9 @@ const struct sched_class rt_sched_class = {
840 .set_cpus_allowed = set_cpus_allowed_rt, 834 .set_cpus_allowed = set_cpus_allowed_rt,
841 .join_domain = join_domain_rt, 835 .join_domain = join_domain_rt,
842 .leave_domain = leave_domain_rt, 836 .leave_domain = leave_domain_rt,
837 .pre_schedule = pre_schedule_rt,
838 .post_schedule = post_schedule_rt,
839 .task_wake_up = task_wake_up_rt,
843#endif 840#endif
844 841
845 .set_curr_task = set_curr_task_rt, 842 .set_curr_task = set_curr_task_rt,