aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-01-17 11:03:27 -0500
committerIngo Molnar <mingo@elte.hu>2011-01-26 06:33:22 -0500
commitda7a735e51f9622eb3e1672594d4a41da01d7e4f (patch)
tree27623dcd39c52a80b79e0ee86ab426fc9c7e2b46 /kernel/sched_rt.c
parenta8941d7ec81678fb69aea7183338175f112f3e0d (diff)
sched: Fix switch_from_fair()
When a task is taken out of the fair class we must ensure the vruntime is properly normalized because when we put it back in it will assume to be normalized. The case that goes wrong is when changing away from the fair class while sleeping. Sleeping tasks have non-normalized vruntime in order to make sleeper-fairness work. So treat the switch away from fair as a wakeup and preserve the relative vruntime. Also update sysrq-n to call the ->switch_{to,from} methods. Reported-by: Onkalo Samu <samu.p.onkalo@nokia.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c914ec747ca6..c381fdc18c64 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1595,8 +1595,7 @@ static void rq_offline_rt(struct rq *rq)
1595 * When switch from the rt queue, we bring ourselves to a position 1595 * When switch from the rt queue, we bring ourselves to a position
1596 * that we might want to pull RT tasks from other runqueues. 1596 * that we might want to pull RT tasks from other runqueues.
1597 */ 1597 */
1598static void switched_from_rt(struct rq *rq, struct task_struct *p, 1598static void switched_from_rt(struct rq *rq, struct task_struct *p)
1599 int running)
1600{ 1599{
1601 /* 1600 /*
1602 * If there are other RT tasks then we will reschedule 1601 * If there are other RT tasks then we will reschedule
@@ -1605,7 +1604,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p,
1605 * we may need to handle the pulling of RT tasks 1604 * we may need to handle the pulling of RT tasks
1606 * now. 1605 * now.
1607 */ 1606 */
1608 if (!rq->rt.rt_nr_running) 1607 if (p->se.on_rq && !rq->rt.rt_nr_running)
1609 pull_rt_task(rq); 1608 pull_rt_task(rq);
1610} 1609}
1611 1610
@@ -1624,8 +1623,7 @@ static inline void init_sched_rt_class(void)
1624 * with RT tasks. In this case we try to push them off to 1623 * with RT tasks. In this case we try to push them off to
1625 * other runqueues. 1624 * other runqueues.
1626 */ 1625 */
1627static void switched_to_rt(struct rq *rq, struct task_struct *p, 1626static void switched_to_rt(struct rq *rq, struct task_struct *p)
1628 int running)
1629{ 1627{
1630 int check_resched = 1; 1628 int check_resched = 1;
1631 1629
@@ -1636,7 +1634,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
1636 * If that current running task is also an RT task 1634 * If that current running task is also an RT task
1637 * then see if we can move to another run queue. 1635 * then see if we can move to another run queue.
1638 */ 1636 */
1639 if (!running) { 1637 if (p->se.on_rq && rq->curr != p) {
1640#ifdef CONFIG_SMP 1638#ifdef CONFIG_SMP
1641 if (rq->rt.overloaded && push_rt_task(rq) && 1639 if (rq->rt.overloaded && push_rt_task(rq) &&
1642 /* Don't resched if we changed runqueues */ 1640 /* Don't resched if we changed runqueues */
@@ -1652,10 +1650,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
1652 * Priority of the task has changed. This may cause 1650 * Priority of the task has changed. This may cause
1653 * us to initiate a push or pull. 1651 * us to initiate a push or pull.
1654 */ 1652 */
1655static void prio_changed_rt(struct rq *rq, struct task_struct *p, 1653static void
1656 int oldprio, int running) 1654prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1657{ 1655{
1658 if (running) { 1656 if (!p->se.on_rq)
1657 return;
1658
1659 if (rq->curr == p) {
1659#ifdef CONFIG_SMP 1660#ifdef CONFIG_SMP
1660 /* 1661 /*
1661 * If our priority decreases while running, we 1662 * If our priority decreases while running, we