diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-01-25 15:08:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:22 -0500 |
commit | cb46984504048db946cd551c261df4e70d59a8ea (patch) | |
tree | e07343cc5967f74370c6b0290b67a225d868a99d /kernel/sched_rt.c | |
parent | 9a897c5a6701bcb6f099f7ca20194999102729fd (diff) |
sched: RT-balance, add new methods to sched_class
Dmitry Adamushko found that the current implementation of the RT
balancing code left out changes to the sched_setscheduler and
rt_mutex_setprio.
This patch addresses this issue by adding methods to the schedule classes
to handle being switched out of (switched_from) and being switched into
(switched_to) a sched_class. Also a method for changing of priorities
is also added (prio_changed).
This patch also removes some duplicate logic between rt_mutex_setprio and
sched_setscheduler.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a5a45104603a..57fa3d96847b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -779,7 +779,92 @@ static void leave_domain_rt(struct rq *rq) | |||
779 | if (rq->rt.overloaded) | 779 | if (rq->rt.overloaded) |
780 | rt_clear_overload(rq); | 780 | rt_clear_overload(rq); |
781 | } | 781 | } |
782 | |||
783 | /* | ||
784 | * When switch from the rt queue, we bring ourselves to a position | ||
785 | * that we might want to pull RT tasks from other runqueues. | ||
786 | */ | ||
787 | static void switched_from_rt(struct rq *rq, struct task_struct *p, | ||
788 | int running) | ||
789 | { | ||
790 | /* | ||
791 | * If there are other RT tasks then we will reschedule | ||
792 | * and the scheduling of the other RT tasks will handle | ||
793 | * the balancing. But if we are the last RT task | ||
794 | * we may need to handle the pulling of RT tasks | ||
795 | * now. | ||
796 | */ | ||
797 | if (!rq->rt.rt_nr_running) | ||
798 | pull_rt_task(rq); | ||
799 | } | ||
800 | #endif /* CONFIG_SMP */ | ||
801 | |||
802 | /* | ||
803 | * When switching a task to RT, we may overload the runqueue | ||
804 | * with RT tasks. In this case we try to push them off to | ||
805 | * other runqueues. | ||
806 | */ | ||
807 | static void switched_to_rt(struct rq *rq, struct task_struct *p, | ||
808 | int running) | ||
809 | { | ||
810 | int check_resched = 1; | ||
811 | |||
812 | /* | ||
813 | * If we are already running, then there's nothing | ||
814 | * that needs to be done. But if we are not running | ||
815 | * we may need to preempt the current running task. | ||
816 | * If that current running task is also an RT task | ||
817 | * then see if we can move to another run queue. | ||
818 | */ | ||
819 | if (!running) { | ||
820 | #ifdef CONFIG_SMP | ||
821 | if (rq->rt.overloaded && push_rt_task(rq) && | ||
822 | /* Don't resched if we changed runqueues */ | ||
823 | rq != task_rq(p)) | ||
824 | check_resched = 0; | ||
825 | #endif /* CONFIG_SMP */ | ||
826 | if (check_resched && p->prio < rq->curr->prio) | ||
827 | resched_task(rq->curr); | ||
828 | } | ||
829 | } | ||
830 | |||
831 | /* | ||
832 | * Priority of the task has changed. This may cause | ||
833 | * us to initiate a push or pull. | ||
834 | */ | ||
835 | static void prio_changed_rt(struct rq *rq, struct task_struct *p, | ||
836 | int oldprio, int running) | ||
837 | { | ||
838 | if (running) { | ||
839 | #ifdef CONFIG_SMP | ||
840 | /* | ||
841 | * If our priority decreases while running, we | ||
842 | * may need to pull tasks to this runqueue. | ||
843 | */ | ||
844 | if (oldprio < p->prio) | ||
845 | pull_rt_task(rq); | ||
846 | /* | ||
847 | * If there's a higher priority task waiting to run | ||
848 | * then reschedule. | ||
849 | */ | ||
850 | if (p->prio > rq->rt.highest_prio) | ||
851 | resched_task(p); | ||
852 | #else | ||
853 | /* For UP simply resched on drop of prio */ | ||
854 | if (oldprio < p->prio) | ||
855 | resched_task(p); | ||
782 | #endif /* CONFIG_SMP */ | 856 | #endif /* CONFIG_SMP */ |
857 | } else { | ||
858 | /* | ||
859 | * This task is not running, but if it is | ||
860 | * greater than the current running task | ||
861 | * then reschedule. | ||
862 | */ | ||
863 | if (p->prio < rq->curr->prio) | ||
864 | resched_task(rq->curr); | ||
865 | } | ||
866 | } | ||
867 | |||
783 | 868 | ||
784 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | 869 | static void task_tick_rt(struct rq *rq, struct task_struct *p) |
785 | { | 870 | { |
@@ -837,8 +922,12 @@ const struct sched_class rt_sched_class = { | |||
837 | .pre_schedule = pre_schedule_rt, | 922 | .pre_schedule = pre_schedule_rt, |
838 | .post_schedule = post_schedule_rt, | 923 | .post_schedule = post_schedule_rt, |
839 | .task_wake_up = task_wake_up_rt, | 924 | .task_wake_up = task_wake_up_rt, |
925 | .switched_from = switched_from_rt, | ||
840 | #endif | 926 | #endif |
841 | 927 | ||
842 | .set_curr_task = set_curr_task_rt, | 928 | .set_curr_task = set_curr_task_rt, |
843 | .task_tick = task_tick_rt, | 929 | .task_tick = task_tick_rt, |
930 | |||
931 | .prio_changed = prio_changed_rt, | ||
932 | .switched_to = switched_to_rt, | ||
844 | }; | 933 | }; |