aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 5f6edca4fafd..4feac8fcb47f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1448,7 +1448,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1448 * means a dl or stop task can slip in, in which case we need 1448 * means a dl or stop task can slip in, in which case we need
1449 * to re-start task selection. 1449 * to re-start task selection.
1450 */ 1450 */
1451 if (unlikely((rq->stop && rq->stop->on_rq) || 1451 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1452 rq->dl.dl_nr_running)) 1452 rq->dl.dl_nr_running))
1453 return RETRY_TASK; 1453 return RETRY_TASK;
1454 } 1454 }
@@ -1624,7 +1624,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1624 !cpumask_test_cpu(lowest_rq->cpu, 1624 !cpumask_test_cpu(lowest_rq->cpu,
1625 tsk_cpus_allowed(task)) || 1625 tsk_cpus_allowed(task)) ||
1626 task_running(rq, task) || 1626 task_running(rq, task) ||
1627 !task->on_rq)) { 1627 !task_on_rq_queued(task))) {
1628 1628
1629 double_unlock_balance(rq, lowest_rq); 1629 double_unlock_balance(rq, lowest_rq);
1630 lowest_rq = NULL; 1630 lowest_rq = NULL;
@@ -1658,7 +1658,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
1658 BUG_ON(task_current(rq, p)); 1658 BUG_ON(task_current(rq, p));
1659 BUG_ON(p->nr_cpus_allowed <= 1); 1659 BUG_ON(p->nr_cpus_allowed <= 1);
1660 1660
1661 BUG_ON(!p->on_rq); 1661 BUG_ON(!task_on_rq_queued(p));
1662 BUG_ON(!rt_task(p)); 1662 BUG_ON(!rt_task(p));
1663 1663
1664 return p; 1664 return p;
@@ -1809,7 +1809,7 @@ static int pull_rt_task(struct rq *this_rq)
1809 */ 1809 */
1810 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 1810 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1811 WARN_ON(p == src_rq->curr); 1811 WARN_ON(p == src_rq->curr);
1812 WARN_ON(!p->on_rq); 1812 WARN_ON(!task_on_rq_queued(p));
1813 1813
1814 /* 1814 /*
1815 * There's a chance that p is higher in priority 1815 * There's a chance that p is higher in priority
@@ -1870,7 +1870,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1870 1870
1871 BUG_ON(!rt_task(p)); 1871 BUG_ON(!rt_task(p));
1872 1872
1873 if (!p->on_rq) 1873 if (!task_on_rq_queued(p))
1874 return; 1874 return;
1875 1875
1876 weight = cpumask_weight(new_mask); 1876 weight = cpumask_weight(new_mask);
@@ -1936,7 +1936,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1936 * we may need to handle the pulling of RT tasks 1936 * we may need to handle the pulling of RT tasks
1937 * now. 1937 * now.
1938 */ 1938 */
1939 if (!p->on_rq || rq->rt.rt_nr_running) 1939 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
1940 return; 1940 return;
1941 1941
1942 if (pull_rt_task(rq)) 1942 if (pull_rt_task(rq))
@@ -1970,7 +1970,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1970 * If that current running task is also an RT task 1970 * If that current running task is also an RT task
1971 * then see if we can move to another run queue. 1971 * then see if we can move to another run queue.
1972 */ 1972 */
1973 if (p->on_rq && rq->curr != p) { 1973 if (task_on_rq_queued(p) && rq->curr != p) {
1974#ifdef CONFIG_SMP 1974#ifdef CONFIG_SMP
1975 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded && 1975 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
1976 /* Don't resched if we changed runqueues */ 1976 /* Don't resched if we changed runqueues */
@@ -1989,7 +1989,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1989static void 1989static void
1990prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 1990prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1991{ 1991{
1992 if (!p->on_rq) 1992 if (!task_on_rq_queued(p))
1993 return; 1993 return;
1994 1994
1995 if (rq->curr == p) { 1995 if (rq->curr == p) {