diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-01-20 15:58:57 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-01-22 12:09:59 -0500 |
commit | ea87bb7853168434f4a82426dd1ea8421f9e604d (patch) | |
tree | fcadec9b0fd1c57e4d1b625e2492c13c852972f1 /kernel/sched.c | |
parent | 7c9414385ebfdd87cc542d4e7e3bb0dbb2d3ce25 (diff) |
sched: Extend enqueue_task to allow head queueing
The ability of enqueueing a task to the head of a SCHED_FIFO priority
list is required to fix some violations of POSIX scheduling policy.
Extend the related functions with a "head" argument.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Tested-by: Carsten Emde <cbe@osadl.org>
Tested-by: Mathias Weber <mathias.weber.mw1@roche.com>
LKML-Reference: <20100120171629.734886007@linutronix.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 41e76d325648..f47560ff3346 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1856,13 +1856,14 @@ static void update_avg(u64 *avg, u64 sample) | |||
1856 | *avg += diff >> 3; | 1856 | *avg += diff >> 3; |
1857 | } | 1857 | } |
1858 | 1858 | ||
1859 | static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) | 1859 | static void |
1860 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) | ||
1860 | { | 1861 | { |
1861 | if (wakeup) | 1862 | if (wakeup) |
1862 | p->se.start_runtime = p->se.sum_exec_runtime; | 1863 | p->se.start_runtime = p->se.sum_exec_runtime; |
1863 | 1864 | ||
1864 | sched_info_queued(p); | 1865 | sched_info_queued(p); |
1865 | p->sched_class->enqueue_task(rq, p, wakeup); | 1866 | p->sched_class->enqueue_task(rq, p, wakeup, head); |
1866 | p->se.on_rq = 1; | 1867 | p->se.on_rq = 1; |
1867 | } | 1868 | } |
1868 | 1869 | ||
@@ -1892,7 +1893,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
1892 | if (task_contributes_to_load(p)) | 1893 | if (task_contributes_to_load(p)) |
1893 | rq->nr_uninterruptible--; | 1894 | rq->nr_uninterruptible--; |
1894 | 1895 | ||
1895 | enqueue_task(rq, p, wakeup); | 1896 | enqueue_task(rq, p, wakeup, false); |
1896 | inc_nr_running(rq); | 1897 | inc_nr_running(rq); |
1897 | } | 1898 | } |
1898 | 1899 | ||
@@ -4236,7 +4237,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4236 | if (running) | 4237 | if (running) |
4237 | p->sched_class->set_curr_task(rq); | 4238 | p->sched_class->set_curr_task(rq); |
4238 | if (on_rq) { | 4239 | if (on_rq) { |
4239 | enqueue_task(rq, p, 0); | 4240 | enqueue_task(rq, p, 0, false); |
4240 | 4241 | ||
4241 | check_class_changed(rq, p, prev_class, oldprio, running); | 4242 | check_class_changed(rq, p, prev_class, oldprio, running); |
4242 | } | 4243 | } |
@@ -4280,7 +4281,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4280 | delta = p->prio - old_prio; | 4281 | delta = p->prio - old_prio; |
4281 | 4282 | ||
4282 | if (on_rq) { | 4283 | if (on_rq) { |
4283 | enqueue_task(rq, p, 0); | 4284 | enqueue_task(rq, p, 0, false); |
4284 | /* | 4285 | /* |
4285 | * If the task increased its priority or is running and | 4286 | * If the task increased its priority or is running and |
4286 | * lowered its priority, then reschedule its CPU: | 4287 | * lowered its priority, then reschedule its CPU: |
@@ -8230,7 +8231,7 @@ void sched_move_task(struct task_struct *tsk) | |||
8230 | if (unlikely(running)) | 8231 | if (unlikely(running)) |
8231 | tsk->sched_class->set_curr_task(rq); | 8232 | tsk->sched_class->set_curr_task(rq); |
8232 | if (on_rq) | 8233 | if (on_rq) |
8233 | enqueue_task(rq, tsk, 0); | 8234 | enqueue_task(rq, tsk, 0, false); |
8234 | 8235 | ||
8235 | task_rq_unlock(rq, &flags); | 8236 | task_rq_unlock(rq, &flags); |
8236 | } | 8237 | } |