aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-01-19 10:09:11 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-02-21 14:20:10 -0500
commit6640be50e8a2f9bd526beddafd23b49406a4af07 (patch)
tree3a2cef9f3b60fd9edeea265e3ec6226e421e356a /kernel/sched.c
parent57bba6a44244a9c7cef5951041bdd3f747b28ed0 (diff)
sched: Extend enqueue_task to allow head queueing
The ability of enqueueing a task to the head of a SCHED_FIFO priority list is required to fix some violations of POSIX scheduling policy. Extend the related functions with a "head" argument. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ba0ee0d34545..8da004bbc8db 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1972,13 +1972,14 @@ static void update_avg(u64 *avg, u64 sample)
1972 *avg += diff >> 3; 1972 *avg += diff >> 3;
1973} 1973}
1974 1974
1975static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1975static void
1976enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1976{ 1977{
1977 if (wakeup) 1978 if (wakeup)
1978 p->se.start_runtime = p->se.sum_exec_runtime; 1979 p->se.start_runtime = p->se.sum_exec_runtime;
1979 1980
1980 sched_info_queued(p); 1981 sched_info_queued(p);
1981 p->sched_class->enqueue_task(rq, p, wakeup); 1982 p->sched_class->enqueue_task(rq, p, wakeup, head);
1982 p->se.on_rq = 1; 1983 p->se.on_rq = 1;
1983} 1984}
1984 1985
@@ -2056,7 +2057,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
2056 if (task_contributes_to_load(p)) 2057 if (task_contributes_to_load(p))
2057 rq->nr_uninterruptible--; 2058 rq->nr_uninterruptible--;
2058 2059
2059 enqueue_task(rq, p, wakeup); 2060 enqueue_task(rq, p, wakeup, false);
2060 inc_nr_running(rq); 2061 inc_nr_running(rq);
2061} 2062}
2062 2063
@@ -6389,7 +6390,7 @@ void task_setprio(struct task_struct *p, int prio)
6389 if (running) 6390 if (running)
6390 p->sched_class->set_curr_task(rq); 6391 p->sched_class->set_curr_task(rq);
6391 if (on_rq) { 6392 if (on_rq) {
6392 enqueue_task(rq, p, 0); 6393 enqueue_task(rq, p, 0, false);
6393 6394
6394 check_class_changed(rq, p, prev_class, oldprio, running); 6395 check_class_changed(rq, p, prev_class, oldprio, running);
6395 } 6396 }
@@ -6433,7 +6434,7 @@ void set_user_nice(struct task_struct *p, long nice)
6433 delta = p->prio - old_prio; 6434 delta = p->prio - old_prio;
6434 6435
6435 if (on_rq) { 6436 if (on_rq) {
6436 enqueue_task(rq, p, 0); 6437 enqueue_task(rq, p, 0, false);
6437 /* 6438 /*
6438 * If the task increased its priority or is running and 6439 * If the task increased its priority or is running and
6439 * lowered its priority, then reschedule its CPU: 6440 * lowered its priority, then reschedule its CPU:
@@ -10522,7 +10523,7 @@ void sched_move_task(struct task_struct *tsk)
10522 if (unlikely(running)) 10523 if (unlikely(running))
10523 tsk->sched_class->set_curr_task(rq); 10524 tsk->sched_class->set_curr_task(rq);
10524 if (on_rq) 10525 if (on_rq)
10525 enqueue_task(rq, tsk, 0); 10526 enqueue_task(rq, tsk, 0, false);
10526 10527
10527 task_rq_unlock(rq, &flags); 10528 task_rq_unlock(rq, &flags);
10528} 10529}