aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-01-20 15:58:57 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-01-22 12:09:59 -0500
commitea87bb7853168434f4a82426dd1ea8421f9e604d (patch)
treefcadec9b0fd1c57e4d1b625e2492c13c852972f1 /kernel
parent7c9414385ebfdd87cc542d4e7e3bb0dbb2d3ce25 (diff)
sched: Extend enqueue_task to allow head queueing
The ability of enqueueing a task to the head of a SCHED_FIFO priority list is required to fix some violations of POSIX scheduling policy. Extend the related functions with a "head" argument. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Tested-by: Carsten Emde <cbe@osadl.org> Tested-by: Mathias Weber <mathias.weber.mw1@roche.com> LKML-Reference: <20100120171629.734886007@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_fair.c3
-rw-r--r--kernel/sched_rt.c3
3 files changed, 11 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 41e76d325648..f47560ff3346 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1856,13 +1856,14 @@ static void update_avg(u64 *avg, u64 sample)
1856 *avg += diff >> 3; 1856 *avg += diff >> 3;
1857} 1857}
1858 1858
1859static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1859static void
1860enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1860{ 1861{
1861 if (wakeup) 1862 if (wakeup)
1862 p->se.start_runtime = p->se.sum_exec_runtime; 1863 p->se.start_runtime = p->se.sum_exec_runtime;
1863 1864
1864 sched_info_queued(p); 1865 sched_info_queued(p);
1865 p->sched_class->enqueue_task(rq, p, wakeup); 1866 p->sched_class->enqueue_task(rq, p, wakeup, head);
1866 p->se.on_rq = 1; 1867 p->se.on_rq = 1;
1867} 1868}
1868 1869
@@ -1892,7 +1893,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1892 if (task_contributes_to_load(p)) 1893 if (task_contributes_to_load(p))
1893 rq->nr_uninterruptible--; 1894 rq->nr_uninterruptible--;
1894 1895
1895 enqueue_task(rq, p, wakeup); 1896 enqueue_task(rq, p, wakeup, false);
1896 inc_nr_running(rq); 1897 inc_nr_running(rq);
1897} 1898}
1898 1899
@@ -4236,7 +4237,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4236 if (running) 4237 if (running)
4237 p->sched_class->set_curr_task(rq); 4238 p->sched_class->set_curr_task(rq);
4238 if (on_rq) { 4239 if (on_rq) {
4239 enqueue_task(rq, p, 0); 4240 enqueue_task(rq, p, 0, false);
4240 4241
4241 check_class_changed(rq, p, prev_class, oldprio, running); 4242 check_class_changed(rq, p, prev_class, oldprio, running);
4242 } 4243 }
@@ -4280,7 +4281,7 @@ void set_user_nice(struct task_struct *p, long nice)
4280 delta = p->prio - old_prio; 4281 delta = p->prio - old_prio;
4281 4282
4282 if (on_rq) { 4283 if (on_rq) {
4283 enqueue_task(rq, p, 0); 4284 enqueue_task(rq, p, 0, false);
4284 /* 4285 /*
4285 * If the task increased its priority or is running and 4286 * If the task increased its priority or is running and
4286 * lowered its priority, then reschedule its CPU: 4287 * lowered its priority, then reschedule its CPU:
@@ -8230,7 +8231,7 @@ void sched_move_task(struct task_struct *tsk)
8230 if (unlikely(running)) 8231 if (unlikely(running))
8231 tsk->sched_class->set_curr_task(rq); 8232 tsk->sched_class->set_curr_task(rq);
8232 if (on_rq) 8233 if (on_rq)
8233 enqueue_task(rq, tsk, 0); 8234 enqueue_task(rq, tsk, 0, false);
8234 8235
8235 task_rq_unlock(rq, &flags); 8236 task_rq_unlock(rq, &flags);
8236} 8237}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 22231ccb2f98..0e7a7af9cf8b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1053,7 +1053,8 @@ static inline void hrtick_update(struct rq *rq)
1053 * increased. Here we update the fair scheduling stats and 1053 * increased. Here we update the fair scheduling stats and
1054 * then put the task into the rbtree: 1054 * then put the task into the rbtree:
1055 */ 1055 */
1056static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) 1056static void
1057enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1057{ 1058{
1058 struct cfs_rq *cfs_rq; 1059 struct cfs_rq *cfs_rq;
1059 struct sched_entity *se = &p->se; 1060 struct sched_entity *se = &p->se;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 502bb614e40a..38076dabb44a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -878,7 +878,8 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
878/* 878/*
879 * Adding/removing a task to/from a priority array: 879 * Adding/removing a task to/from a priority array:
880 */ 880 */
881static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) 881static void
882enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
882{ 883{
883 struct sched_rt_entity *rt_se = &p->rt; 884 struct sched_rt_entity *rt_se = &p->rt;
884 885