diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-01-20 15:59:01 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-01-22 12:09:59 -0500 |
commit | 37dad3fce97f01e5149d69de0833d8452c0e862e (patch) | |
tree | b61b32fd56301211e06c4ff8d7d72a08eb7af182 /kernel | |
parent | ea87bb7853168434f4a82426dd1ea8421f9e604d (diff) |
sched: Implement head queueing for sched_rt
The ability of enqueueing a task to the head of a SCHED_FIFO priority
list is required to fix some violations of POSIX scheduling policy.
Implement the functionality in sched_rt.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Tested-by: Carsten Emde <cbe@osadl.org>
Tested-by: Mathias Weber <mathias.weber.mw1@roche.com>
LKML-Reference: <20100120171629.772169931@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_rt.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 38076dabb44a..ca49ceb01201 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -194,7 +194,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
194 | return rt_se->my_q; | 194 | return rt_se->my_q; |
195 | } | 195 | } |
196 | 196 | ||
197 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se); | 197 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); |
198 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | 198 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); |
199 | 199 | ||
200 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 200 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
@@ -204,7 +204,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
204 | 204 | ||
205 | if (rt_rq->rt_nr_running) { | 205 | if (rt_rq->rt_nr_running) { |
206 | if (rt_se && !on_rt_rq(rt_se)) | 206 | if (rt_se && !on_rt_rq(rt_se)) |
207 | enqueue_rt_entity(rt_se); | 207 | enqueue_rt_entity(rt_se, false); |
208 | if (rt_rq->highest_prio.curr < curr->prio) | 208 | if (rt_rq->highest_prio.curr < curr->prio) |
209 | resched_task(curr); | 209 | resched_task(curr); |
210 | } | 210 | } |
@@ -803,7 +803,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
803 | dec_rt_group(rt_se, rt_rq); | 803 | dec_rt_group(rt_se, rt_rq); |
804 | } | 804 | } |
805 | 805 | ||
806 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | 806 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) |
807 | { | 807 | { |
808 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 808 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
809 | struct rt_prio_array *array = &rt_rq->active; | 809 | struct rt_prio_array *array = &rt_rq->active; |
@@ -819,7 +819,10 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
819 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 819 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
820 | return; | 820 | return; |
821 | 821 | ||
822 | list_add_tail(&rt_se->run_list, queue); | 822 | if (head) |
823 | list_add(&rt_se->run_list, queue); | ||
824 | else | ||
825 | list_add_tail(&rt_se->run_list, queue); | ||
823 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 826 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
824 | 827 | ||
825 | inc_rt_tasks(rt_se, rt_rq); | 828 | inc_rt_tasks(rt_se, rt_rq); |
@@ -856,11 +859,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) | |||
856 | } | 859 | } |
857 | } | 860 | } |
858 | 861 | ||
859 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 862 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) |
860 | { | 863 | { |
861 | dequeue_rt_stack(rt_se); | 864 | dequeue_rt_stack(rt_se); |
862 | for_each_sched_rt_entity(rt_se) | 865 | for_each_sched_rt_entity(rt_se) |
863 | __enqueue_rt_entity(rt_se); | 866 | __enqueue_rt_entity(rt_se, head); |
864 | } | 867 | } |
865 | 868 | ||
866 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | 869 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) |
@@ -871,7 +874,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
871 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | 874 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
872 | 875 | ||
873 | if (rt_rq && rt_rq->rt_nr_running) | 876 | if (rt_rq && rt_rq->rt_nr_running) |
874 | __enqueue_rt_entity(rt_se); | 877 | __enqueue_rt_entity(rt_se, false); |
875 | } | 878 | } |
876 | } | 879 | } |
877 | 880 | ||
@@ -886,7 +889,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) | |||
886 | if (wakeup) | 889 | if (wakeup) |
887 | rt_se->timeout = 0; | 890 | rt_se->timeout = 0; |
888 | 891 | ||
889 | enqueue_rt_entity(rt_se); | 892 | enqueue_rt_entity(rt_se, head); |
890 | 893 | ||
891 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | 894 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) |
892 | enqueue_pushable_task(rq, p); | 895 | enqueue_pushable_task(rq, p); |