aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/core.c82
-rw-r--r--kernel/sched/deadline.c15
-rw-r--r--kernel/sched/fair.c22
-rw-r--r--kernel/sched/rt.c16
-rw-r--r--kernel/sched/sched.h7
-rw-r--r--kernel/sched/stop_task.c2
6 files changed, 76 insertions, 68 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4f2826f46e95..a02b624fee6c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1043,7 +1043,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1043 * A queue event has occurred, and we're going to schedule. In 1043 * A queue event has occurred, and we're going to schedule. In
1044 * this case, we can save a useless back to back clock update. 1044 * this case, we can save a useless back to back clock update.
1045 */ 1045 */
1046 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) 1046 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1047 rq->skip_clock_update = 1; 1047 rq->skip_clock_update = 1;
1048} 1048}
1049 1049
@@ -1088,7 +1088,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1088 1088
1089static void __migrate_swap_task(struct task_struct *p, int cpu) 1089static void __migrate_swap_task(struct task_struct *p, int cpu)
1090{ 1090{
1091 if (p->on_rq) { 1091 if (task_on_rq_queued(p)) {
1092 struct rq *src_rq, *dst_rq; 1092 struct rq *src_rq, *dst_rq;
1093 1093
1094 src_rq = task_rq(p); 1094 src_rq = task_rq(p);
@@ -1214,7 +1214,7 @@ static int migration_cpu_stop(void *data);
1214unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1214unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1215{ 1215{
1216 unsigned long flags; 1216 unsigned long flags;
1217 int running, on_rq; 1217 int running, queued;
1218 unsigned long ncsw; 1218 unsigned long ncsw;
1219 struct rq *rq; 1219 struct rq *rq;
1220 1220
@@ -1252,7 +1252,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1252 rq = task_rq_lock(p, &flags); 1252 rq = task_rq_lock(p, &flags);
1253 trace_sched_wait_task(p); 1253 trace_sched_wait_task(p);
1254 running = task_running(rq, p); 1254 running = task_running(rq, p);
1255 on_rq = p->on_rq; 1255 queued = task_on_rq_queued(p);
1256 ncsw = 0; 1256 ncsw = 0;
1257 if (!match_state || p->state == match_state) 1257 if (!match_state || p->state == match_state)
1258 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1258 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -1284,7 +1284,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1284 * running right now), it's preempted, and we should 1284 * running right now), it's preempted, and we should
1285 * yield - it could be a while. 1285 * yield - it could be a while.
1286 */ 1286 */
1287 if (unlikely(on_rq)) { 1287 if (unlikely(queued)) {
1288 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1288 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1289 1289
1290 set_current_state(TASK_UNINTERRUPTIBLE); 1290 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1478,7 +1478,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1478static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1478static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1479{ 1479{
1480 activate_task(rq, p, en_flags); 1480 activate_task(rq, p, en_flags);
1481 p->on_rq = 1; 1481 p->on_rq = TASK_ON_RQ_QUEUED;
1482 1482
1483 /* if a worker is waking up, notify workqueue */ 1483 /* if a worker is waking up, notify workqueue */
1484 if (p->flags & PF_WQ_WORKER) 1484 if (p->flags & PF_WQ_WORKER)
@@ -1537,7 +1537,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
1537 int ret = 0; 1537 int ret = 0;
1538 1538
1539 rq = __task_rq_lock(p); 1539 rq = __task_rq_lock(p);
1540 if (p->on_rq) { 1540 if (task_on_rq_queued(p)) {
1541 /* check_preempt_curr() may use rq clock */ 1541 /* check_preempt_curr() may use rq clock */
1542 update_rq_clock(rq); 1542 update_rq_clock(rq);
1543 ttwu_do_wakeup(rq, p, wake_flags); 1543 ttwu_do_wakeup(rq, p, wake_flags);
@@ -1678,7 +1678,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1678 success = 1; /* we're going to change ->state */ 1678 success = 1; /* we're going to change ->state */
1679 cpu = task_cpu(p); 1679 cpu = task_cpu(p);
1680 1680
1681 if (p->on_rq && ttwu_remote(p, wake_flags)) 1681 if (task_on_rq_queued(p) && ttwu_remote(p, wake_flags))
1682 goto stat; 1682 goto stat;
1683 1683
1684#ifdef CONFIG_SMP 1684#ifdef CONFIG_SMP
@@ -1742,7 +1742,7 @@ static void try_to_wake_up_local(struct task_struct *p)
1742 if (!(p->state & TASK_NORMAL)) 1742 if (!(p->state & TASK_NORMAL))
1743 goto out; 1743 goto out;
1744 1744
1745 if (!p->on_rq) 1745 if (!task_on_rq_queued(p))
1746 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 1746 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1747 1747
1748 ttwu_do_wakeup(rq, p, 0); 1748 ttwu_do_wakeup(rq, p, 0);
@@ -2095,7 +2095,7 @@ void wake_up_new_task(struct task_struct *p)
2095 init_task_runnable_average(p); 2095 init_task_runnable_average(p);
2096 rq = __task_rq_lock(p); 2096 rq = __task_rq_lock(p);
2097 activate_task(rq, p, 0); 2097 activate_task(rq, p, 0);
2098 p->on_rq = 1; 2098 p->on_rq = TASK_ON_RQ_QUEUED;
2099 trace_sched_wakeup_new(p, true); 2099 trace_sched_wakeup_new(p, true);
2100 check_preempt_curr(rq, p, WF_FORK); 2100 check_preempt_curr(rq, p, WF_FORK);
2101#ifdef CONFIG_SMP 2101#ifdef CONFIG_SMP
@@ -2444,7 +2444,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2444 * project cycles that may never be accounted to this 2444 * project cycles that may never be accounted to this
2445 * thread, breaking clock_gettime(). 2445 * thread, breaking clock_gettime().
2446 */ 2446 */
2447 if (task_current(rq, p) && p->on_rq) { 2447 if (task_current(rq, p) && task_on_rq_queued(p)) {
2448 update_rq_clock(rq); 2448 update_rq_clock(rq);
2449 ns = rq_clock_task(rq) - p->se.exec_start; 2449 ns = rq_clock_task(rq) - p->se.exec_start;
2450 if ((s64)ns < 0) 2450 if ((s64)ns < 0)
@@ -2490,7 +2490,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2490 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 2490 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2491 * been accounted, so we're correct here as well. 2491 * been accounted, so we're correct here as well.
2492 */ 2492 */
2493 if (!p->on_cpu || !p->on_rq) 2493 if (!p->on_cpu || !task_on_rq_queued(p))
2494 return p->se.sum_exec_runtime; 2494 return p->se.sum_exec_runtime;
2495#endif 2495#endif
2496 2496
@@ -2794,7 +2794,7 @@ need_resched:
2794 switch_count = &prev->nvcsw; 2794 switch_count = &prev->nvcsw;
2795 } 2795 }
2796 2796
2797 if (prev->on_rq || rq->skip_clock_update < 0) 2797 if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
2798 update_rq_clock(rq); 2798 update_rq_clock(rq);
2799 2799
2800 next = pick_next_task(rq, prev); 2800 next = pick_next_task(rq, prev);
@@ -2959,7 +2959,7 @@ EXPORT_SYMBOL(default_wake_function);
2959 */ 2959 */
2960void rt_mutex_setprio(struct task_struct *p, int prio) 2960void rt_mutex_setprio(struct task_struct *p, int prio)
2961{ 2961{
2962 int oldprio, on_rq, running, enqueue_flag = 0; 2962 int oldprio, queued, running, enqueue_flag = 0;
2963 struct rq *rq; 2963 struct rq *rq;
2964 const struct sched_class *prev_class; 2964 const struct sched_class *prev_class;
2965 2965
@@ -2988,9 +2988,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
2988 trace_sched_pi_setprio(p, prio); 2988 trace_sched_pi_setprio(p, prio);
2989 oldprio = p->prio; 2989 oldprio = p->prio;
2990 prev_class = p->sched_class; 2990 prev_class = p->sched_class;
2991 on_rq = p->on_rq; 2991 queued = task_on_rq_queued(p);
2992 running = task_current(rq, p); 2992 running = task_current(rq, p);
2993 if (on_rq) 2993 if (queued)
2994 dequeue_task(rq, p, 0); 2994 dequeue_task(rq, p, 0);
2995 if (running) 2995 if (running)
2996 p->sched_class->put_prev_task(rq, p); 2996 p->sched_class->put_prev_task(rq, p);
@@ -3030,7 +3030,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3030 3030
3031 if (running) 3031 if (running)
3032 p->sched_class->set_curr_task(rq); 3032 p->sched_class->set_curr_task(rq);
3033 if (on_rq) 3033 if (queued)
3034 enqueue_task(rq, p, enqueue_flag); 3034 enqueue_task(rq, p, enqueue_flag);
3035 3035
3036 check_class_changed(rq, p, prev_class, oldprio); 3036 check_class_changed(rq, p, prev_class, oldprio);
@@ -3041,7 +3041,7 @@ out_unlock:
3041 3041
3042void set_user_nice(struct task_struct *p, long nice) 3042void set_user_nice(struct task_struct *p, long nice)
3043{ 3043{
3044 int old_prio, delta, on_rq; 3044 int old_prio, delta, queued;
3045 unsigned long flags; 3045 unsigned long flags;
3046 struct rq *rq; 3046 struct rq *rq;
3047 3047
@@ -3062,8 +3062,8 @@ void set_user_nice(struct task_struct *p, long nice)
3062 p->static_prio = NICE_TO_PRIO(nice); 3062 p->static_prio = NICE_TO_PRIO(nice);
3063 goto out_unlock; 3063 goto out_unlock;
3064 } 3064 }
3065 on_rq = p->on_rq; 3065 queued = task_on_rq_queued(p);
3066 if (on_rq) 3066 if (queued)
3067 dequeue_task(rq, p, 0); 3067 dequeue_task(rq, p, 0);
3068 3068
3069 p->static_prio = NICE_TO_PRIO(nice); 3069 p->static_prio = NICE_TO_PRIO(nice);
@@ -3072,7 +3072,7 @@ void set_user_nice(struct task_struct *p, long nice)
3072 p->prio = effective_prio(p); 3072 p->prio = effective_prio(p);
3073 delta = p->prio - old_prio; 3073 delta = p->prio - old_prio;
3074 3074
3075 if (on_rq) { 3075 if (queued) {
3076 enqueue_task(rq, p, 0); 3076 enqueue_task(rq, p, 0);
3077 /* 3077 /*
3078 * If the task increased its priority or is running and 3078 * If the task increased its priority or is running and
@@ -3344,7 +3344,7 @@ static int __sched_setscheduler(struct task_struct *p,
3344{ 3344{
3345 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3345 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3346 MAX_RT_PRIO - 1 - attr->sched_priority; 3346 MAX_RT_PRIO - 1 - attr->sched_priority;
3347 int retval, oldprio, oldpolicy = -1, on_rq, running; 3347 int retval, oldprio, oldpolicy = -1, queued, running;
3348 int policy = attr->sched_policy; 3348 int policy = attr->sched_policy;
3349 unsigned long flags; 3349 unsigned long flags;
3350 const struct sched_class *prev_class; 3350 const struct sched_class *prev_class;
@@ -3541,9 +3541,9 @@ change:
3541 return 0; 3541 return 0;
3542 } 3542 }
3543 3543
3544 on_rq = p->on_rq; 3544 queued = task_on_rq_queued(p);
3545 running = task_current(rq, p); 3545 running = task_current(rq, p);
3546 if (on_rq) 3546 if (queued)
3547 dequeue_task(rq, p, 0); 3547 dequeue_task(rq, p, 0);
3548 if (running) 3548 if (running)
3549 p->sched_class->put_prev_task(rq, p); 3549 p->sched_class->put_prev_task(rq, p);
@@ -3553,7 +3553,7 @@ change:
3553 3553
3554 if (running) 3554 if (running)
3555 p->sched_class->set_curr_task(rq); 3555 p->sched_class->set_curr_task(rq);
3556 if (on_rq) { 3556 if (queued) {
3557 /* 3557 /*
3558 * We enqueue to tail when the priority of a task is 3558 * We enqueue to tail when the priority of a task is
3559 * increased (user space view). 3559 * increased (user space view).
@@ -4568,7 +4568,7 @@ void init_idle(struct task_struct *idle, int cpu)
4568 rcu_read_unlock(); 4568 rcu_read_unlock();
4569 4569
4570 rq->curr = rq->idle = idle; 4570 rq->curr = rq->idle = idle;
4571 idle->on_rq = 1; 4571 idle->on_rq = TASK_ON_RQ_QUEUED;
4572#if defined(CONFIG_SMP) 4572#if defined(CONFIG_SMP)
4573 idle->on_cpu = 1; 4573 idle->on_cpu = 1;
4574#endif 4574#endif
@@ -4645,7 +4645,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4645 goto out; 4645 goto out;
4646 4646
4647 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 4647 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4648 if (p->on_rq) { 4648 if (task_on_rq_queued(p)) {
4649 struct migration_arg arg = { p, dest_cpu }; 4649 struct migration_arg arg = { p, dest_cpu };
4650 /* Need help from migration thread: drop lock and wait. */ 4650 /* Need help from migration thread: drop lock and wait. */
4651 task_rq_unlock(rq, p, &flags); 4651 task_rq_unlock(rq, p, &flags);
@@ -4695,7 +4695,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4695 * If we're not on a rq, the next wake-up will ensure we're 4695 * If we're not on a rq, the next wake-up will ensure we're
4696 * placed properly. 4696 * placed properly.
4697 */ 4697 */
4698 if (p->on_rq) { 4698 if (task_on_rq_queued(p)) {
4699 dequeue_task(rq_src, p, 0); 4699 dequeue_task(rq_src, p, 0);
4700 set_task_cpu(p, dest_cpu); 4700 set_task_cpu(p, dest_cpu);
4701 enqueue_task(rq_dest, p, 0); 4701 enqueue_task(rq_dest, p, 0);
@@ -4736,13 +4736,13 @@ void sched_setnuma(struct task_struct *p, int nid)
4736{ 4736{
4737 struct rq *rq; 4737 struct rq *rq;
4738 unsigned long flags; 4738 unsigned long flags;
4739 bool on_rq, running; 4739 bool queued, running;
4740 4740
4741 rq = task_rq_lock(p, &flags); 4741 rq = task_rq_lock(p, &flags);
4742 on_rq = p->on_rq; 4742 queued = task_on_rq_queued(p);
4743 running = task_current(rq, p); 4743 running = task_current(rq, p);
4744 4744
4745 if (on_rq) 4745 if (queued)
4746 dequeue_task(rq, p, 0); 4746 dequeue_task(rq, p, 0);
4747 if (running) 4747 if (running)
4748 p->sched_class->put_prev_task(rq, p); 4748 p->sched_class->put_prev_task(rq, p);
@@ -4751,7 +4751,7 @@ void sched_setnuma(struct task_struct *p, int nid)
4751 4751
4752 if (running) 4752 if (running)
4753 p->sched_class->set_curr_task(rq); 4753 p->sched_class->set_curr_task(rq);
4754 if (on_rq) 4754 if (queued)
4755 enqueue_task(rq, p, 0); 4755 enqueue_task(rq, p, 0);
4756 task_rq_unlock(rq, p, &flags); 4756 task_rq_unlock(rq, p, &flags);
4757} 4757}
@@ -7116,13 +7116,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7116 .sched_policy = SCHED_NORMAL, 7116 .sched_policy = SCHED_NORMAL,
7117 }; 7117 };
7118 int old_prio = p->prio; 7118 int old_prio = p->prio;
7119 int on_rq; 7119 int queued;
7120 7120
7121 on_rq = p->on_rq; 7121 queued = task_on_rq_queued(p);
7122 if (on_rq) 7122 if (queued)
7123 dequeue_task(rq, p, 0); 7123 dequeue_task(rq, p, 0);
7124 __setscheduler(rq, p, &attr); 7124 __setscheduler(rq, p, &attr);
7125 if (on_rq) { 7125 if (queued) {
7126 enqueue_task(rq, p, 0); 7126 enqueue_task(rq, p, 0);
7127 resched_curr(rq); 7127 resched_curr(rq);
7128 } 7128 }
@@ -7309,16 +7309,16 @@ void sched_offline_group(struct task_group *tg)
7309void sched_move_task(struct task_struct *tsk) 7309void sched_move_task(struct task_struct *tsk)
7310{ 7310{
7311 struct task_group *tg; 7311 struct task_group *tg;
7312 int on_rq, running; 7312 int queued, running;
7313 unsigned long flags; 7313 unsigned long flags;
7314 struct rq *rq; 7314 struct rq *rq;
7315 7315
7316 rq = task_rq_lock(tsk, &flags); 7316 rq = task_rq_lock(tsk, &flags);
7317 7317
7318 running = task_current(rq, tsk); 7318 running = task_current(rq, tsk);
7319 on_rq = tsk->on_rq; 7319 queued = task_on_rq_queued(tsk);
7320 7320
7321 if (on_rq) 7321 if (queued)
7322 dequeue_task(rq, tsk, 0); 7322 dequeue_task(rq, tsk, 0);
7323 if (unlikely(running)) 7323 if (unlikely(running))
7324 tsk->sched_class->put_prev_task(rq, tsk); 7324 tsk->sched_class->put_prev_task(rq, tsk);
@@ -7331,14 +7331,14 @@ void sched_move_task(struct task_struct *tsk)
7331 7331
7332#ifdef CONFIG_FAIR_GROUP_SCHED 7332#ifdef CONFIG_FAIR_GROUP_SCHED
7333 if (tsk->sched_class->task_move_group) 7333 if (tsk->sched_class->task_move_group)
7334 tsk->sched_class->task_move_group(tsk, on_rq); 7334 tsk->sched_class->task_move_group(tsk, queued);
7335 else 7335 else
7336#endif 7336#endif
7337 set_task_rq(tsk, task_cpu(tsk)); 7337 set_task_rq(tsk, task_cpu(tsk));
7338 7338
7339 if (unlikely(running)) 7339 if (unlikely(running))
7340 tsk->sched_class->set_curr_task(rq); 7340 tsk->sched_class->set_curr_task(rq);
7341 if (on_rq) 7341 if (queued)
7342 enqueue_task(rq, tsk, 0); 7342 enqueue_task(rq, tsk, 0);
7343 7343
7344 task_rq_unlock(rq, tsk, &flags); 7344 task_rq_unlock(rq, tsk, &flags);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 255ce138b652..d21a8e0259d2 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -530,7 +530,7 @@ again:
530 update_rq_clock(rq); 530 update_rq_clock(rq);
531 dl_se->dl_throttled = 0; 531 dl_se->dl_throttled = 0;
532 dl_se->dl_yielded = 0; 532 dl_se->dl_yielded = 0;
533 if (p->on_rq) { 533 if (task_on_rq_queued(p)) {
534 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 534 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
535 if (task_has_dl_policy(rq->curr)) 535 if (task_has_dl_policy(rq->curr))
536 check_preempt_curr_dl(rq, p, 0); 536 check_preempt_curr_dl(rq, p, 0);
@@ -1030,7 +1030,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1030 * means a stop task can slip in, in which case we need to 1030 * means a stop task can slip in, in which case we need to
1031 * re-start task selection. 1031 * re-start task selection.
1032 */ 1032 */
1033 if (rq->stop && rq->stop->on_rq) 1033 if (rq->stop && task_on_rq_queued(rq->stop))
1034 return RETRY_TASK; 1034 return RETRY_TASK;
1035 } 1035 }
1036 1036
@@ -1257,7 +1257,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1257 if (unlikely(task_rq(task) != rq || 1257 if (unlikely(task_rq(task) != rq ||
1258 !cpumask_test_cpu(later_rq->cpu, 1258 !cpumask_test_cpu(later_rq->cpu,
1259 &task->cpus_allowed) || 1259 &task->cpus_allowed) ||
1260 task_running(rq, task) || !task->on_rq)) { 1260 task_running(rq, task) ||
1261 !task_on_rq_queued(task))) {
1261 double_unlock_balance(rq, later_rq); 1262 double_unlock_balance(rq, later_rq);
1262 later_rq = NULL; 1263 later_rq = NULL;
1263 break; 1264 break;
@@ -1296,7 +1297,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1296 BUG_ON(task_current(rq, p)); 1297 BUG_ON(task_current(rq, p));
1297 BUG_ON(p->nr_cpus_allowed <= 1); 1298 BUG_ON(p->nr_cpus_allowed <= 1);
1298 1299
1299 BUG_ON(!p->on_rq); 1300 BUG_ON(!task_on_rq_queued(p));
1300 BUG_ON(!dl_task(p)); 1301 BUG_ON(!dl_task(p));
1301 1302
1302 return p; 1303 return p;
@@ -1443,7 +1444,7 @@ static int pull_dl_task(struct rq *this_rq)
1443 dl_time_before(p->dl.deadline, 1444 dl_time_before(p->dl.deadline,
1444 this_rq->dl.earliest_dl.curr))) { 1445 this_rq->dl.earliest_dl.curr))) {
1445 WARN_ON(p == src_rq->curr); 1446 WARN_ON(p == src_rq->curr);
1446 WARN_ON(!p->on_rq); 1447 WARN_ON(!task_on_rq_queued(p));
1447 1448
1448 /* 1449 /*
1449 * Then we pull iff p has actually an earlier 1450 * Then we pull iff p has actually an earlier
@@ -1596,7 +1597,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
1596 if (unlikely(p->dl.dl_throttled)) 1597 if (unlikely(p->dl.dl_throttled))
1597 return; 1598 return;
1598 1599
1599 if (p->on_rq && rq->curr != p) { 1600 if (task_on_rq_queued(p) && rq->curr != p) {
1600#ifdef CONFIG_SMP 1601#ifdef CONFIG_SMP
1601 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) 1602 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1602 /* Only reschedule if pushing failed */ 1603 /* Only reschedule if pushing failed */
@@ -1614,7 +1615,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
1614static void prio_changed_dl(struct rq *rq, struct task_struct *p, 1615static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1615 int oldprio) 1616 int oldprio)
1616{ 1617{
1617 if (p->on_rq || rq->curr == p) { 1618 if (task_on_rq_queued(p) || rq->curr == p) {
1618#ifdef CONFIG_SMP 1619#ifdef CONFIG_SMP
1619 /* 1620 /*
1620 * This might be too much, but unfortunately 1621 * This might be too much, but unfortunately
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bc37bb97159f..9e6ca0d88f51 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7494,7 +7494,7 @@ static void task_fork_fair(struct task_struct *p)
7494static void 7494static void
7495prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) 7495prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
7496{ 7496{
7497 if (!p->on_rq) 7497 if (!task_on_rq_queued(p))
7498 return; 7498 return;
7499 7499
7500 /* 7500 /*
@@ -7519,11 +7519,11 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
7519 * switched back to the fair class the enqueue_entity(.flags=0) will 7519 * switched back to the fair class the enqueue_entity(.flags=0) will
7520 * do the right thing. 7520 * do the right thing.
7521 * 7521 *
7522 * If it's on_rq, then the dequeue_entity(.flags=0) will already 7522 * If it's queued, then the dequeue_entity(.flags=0) will already
7523 * have normalized the vruntime, if it's !on_rq, then only when 7523 * have normalized the vruntime, if it's !queued, then only when
7524 * the task is sleeping will it still have non-normalized vruntime. 7524 * the task is sleeping will it still have non-normalized vruntime.
7525 */ 7525 */
7526 if (!p->on_rq && p->state != TASK_RUNNING) { 7526 if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
7527 /* 7527 /*
7528 * Fix up our vruntime so that the current sleep doesn't 7528 * Fix up our vruntime so that the current sleep doesn't
7529 * cause 'unlimited' sleep bonus. 7529 * cause 'unlimited' sleep bonus.
@@ -7558,7 +7558,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
7558 */ 7558 */
7559 se->depth = se->parent ? se->parent->depth + 1 : 0; 7559 se->depth = se->parent ? se->parent->depth + 1 : 0;
7560#endif 7560#endif
7561 if (!p->on_rq) 7561 if (!task_on_rq_queued(p))
7562 return; 7562 return;
7563 7563
7564 /* 7564 /*
@@ -7604,7 +7604,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
7604} 7604}
7605 7605
7606#ifdef CONFIG_FAIR_GROUP_SCHED 7606#ifdef CONFIG_FAIR_GROUP_SCHED
7607static void task_move_group_fair(struct task_struct *p, int on_rq) 7607static void task_move_group_fair(struct task_struct *p, int queued)
7608{ 7608{
7609 struct sched_entity *se = &p->se; 7609 struct sched_entity *se = &p->se;
7610 struct cfs_rq *cfs_rq; 7610 struct cfs_rq *cfs_rq;
@@ -7623,7 +7623,7 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
7623 * fair sleeper stuff for the first placement, but who cares. 7623 * fair sleeper stuff for the first placement, but who cares.
7624 */ 7624 */
7625 /* 7625 /*
7626 * When !on_rq, vruntime of the task has usually NOT been normalized. 7626 * When !queued, vruntime of the task has usually NOT been normalized.
7627 * But there are some cases where it has already been normalized: 7627 * But there are some cases where it has already been normalized:
7628 * 7628 *
7629 * - Moving a forked child which is waiting for being woken up by 7629 * - Moving a forked child which is waiting for being woken up by
@@ -7634,14 +7634,14 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
7634 * To prevent boost or penalty in the new cfs_rq caused by delta 7634 * To prevent boost or penalty in the new cfs_rq caused by delta
7635 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. 7635 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7636 */ 7636 */
7637 if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING)) 7637 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
7638 on_rq = 1; 7638 queued = 1;
7639 7639
7640 if (!on_rq) 7640 if (!queued)
7641 se->vruntime -= cfs_rq_of(se)->min_vruntime; 7641 se->vruntime -= cfs_rq_of(se)->min_vruntime;
7642 set_task_rq(p, task_cpu(p)); 7642 set_task_rq(p, task_cpu(p));
7643 se->depth = se->parent ? se->parent->depth + 1 : 0; 7643 se->depth = se->parent ? se->parent->depth + 1 : 0;
7644 if (!on_rq) { 7644 if (!queued) {
7645 cfs_rq = cfs_rq_of(se); 7645 cfs_rq = cfs_rq_of(se);
7646 se->vruntime += cfs_rq->min_vruntime; 7646 se->vruntime += cfs_rq->min_vruntime;
7647#ifdef CONFIG_SMP 7647#ifdef CONFIG_SMP
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 5f6edca4fafd..4feac8fcb47f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1448,7 +1448,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1448 * means a dl or stop task can slip in, in which case we need 1448 * means a dl or stop task can slip in, in which case we need
1449 * to re-start task selection. 1449 * to re-start task selection.
1450 */ 1450 */
1451 if (unlikely((rq->stop && rq->stop->on_rq) || 1451 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1452 rq->dl.dl_nr_running)) 1452 rq->dl.dl_nr_running))
1453 return RETRY_TASK; 1453 return RETRY_TASK;
1454 } 1454 }
@@ -1624,7 +1624,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1624 !cpumask_test_cpu(lowest_rq->cpu, 1624 !cpumask_test_cpu(lowest_rq->cpu,
1625 tsk_cpus_allowed(task)) || 1625 tsk_cpus_allowed(task)) ||
1626 task_running(rq, task) || 1626 task_running(rq, task) ||
1627 !task->on_rq)) { 1627 !task_on_rq_queued(task))) {
1628 1628
1629 double_unlock_balance(rq, lowest_rq); 1629 double_unlock_balance(rq, lowest_rq);
1630 lowest_rq = NULL; 1630 lowest_rq = NULL;
@@ -1658,7 +1658,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
1658 BUG_ON(task_current(rq, p)); 1658 BUG_ON(task_current(rq, p));
1659 BUG_ON(p->nr_cpus_allowed <= 1); 1659 BUG_ON(p->nr_cpus_allowed <= 1);
1660 1660
1661 BUG_ON(!p->on_rq); 1661 BUG_ON(!task_on_rq_queued(p));
1662 BUG_ON(!rt_task(p)); 1662 BUG_ON(!rt_task(p));
1663 1663
1664 return p; 1664 return p;
@@ -1809,7 +1809,7 @@ static int pull_rt_task(struct rq *this_rq)
1809 */ 1809 */
1810 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 1810 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1811 WARN_ON(p == src_rq->curr); 1811 WARN_ON(p == src_rq->curr);
1812 WARN_ON(!p->on_rq); 1812 WARN_ON(!task_on_rq_queued(p));
1813 1813
1814 /* 1814 /*
1815 * There's a chance that p is higher in priority 1815 * There's a chance that p is higher in priority
@@ -1870,7 +1870,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1870 1870
1871 BUG_ON(!rt_task(p)); 1871 BUG_ON(!rt_task(p));
1872 1872
1873 if (!p->on_rq) 1873 if (!task_on_rq_queued(p))
1874 return; 1874 return;
1875 1875
1876 weight = cpumask_weight(new_mask); 1876 weight = cpumask_weight(new_mask);
@@ -1936,7 +1936,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1936 * we may need to handle the pulling of RT tasks 1936 * we may need to handle the pulling of RT tasks
1937 * now. 1937 * now.
1938 */ 1938 */
1939 if (!p->on_rq || rq->rt.rt_nr_running) 1939 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
1940 return; 1940 return;
1941 1941
1942 if (pull_rt_task(rq)) 1942 if (pull_rt_task(rq))
@@ -1970,7 +1970,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1970 * If that current running task is also an RT task 1970 * If that current running task is also an RT task
1971 * then see if we can move to another run queue. 1971 * then see if we can move to another run queue.
1972 */ 1972 */
1973 if (p->on_rq && rq->curr != p) { 1973 if (task_on_rq_queued(p) && rq->curr != p) {
1974#ifdef CONFIG_SMP 1974#ifdef CONFIG_SMP
1975 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded && 1975 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
1976 /* Don't resched if we changed runqueues */ 1976 /* Don't resched if we changed runqueues */
@@ -1989,7 +1989,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1989static void 1989static void
1990prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 1990prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1991{ 1991{
1992 if (!p->on_rq) 1992 if (!task_on_rq_queued(p))
1993 return; 1993 return;
1994 1994
1995 if (rq->curr == p) { 1995 if (rq->curr == p) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4c2b87fd5f52..26566d0c67ac 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -15,6 +15,9 @@
15 15
16struct rq; 16struct rq;
17 17
18/* task_struct::on_rq states: */
19#define TASK_ON_RQ_QUEUED 1
20
18extern __read_mostly int scheduler_running; 21extern __read_mostly int scheduler_running;
19 22
20extern unsigned long calc_load_update; 23extern unsigned long calc_load_update;
@@ -942,6 +945,10 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
942#endif 945#endif
943} 946}
944 947
948static inline int task_on_rq_queued(struct task_struct *p)
949{
950 return p->on_rq == TASK_ON_RQ_QUEUED;
951}
945 952
946#ifndef prepare_arch_switch 953#ifndef prepare_arch_switch
947# define prepare_arch_switch(next) do { } while (0) 954# define prepare_arch_switch(next) do { } while (0)
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index bfe0edadbfbb..67426e529f59 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -28,7 +28,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev)
28{ 28{
29 struct task_struct *stop = rq->stop; 29 struct task_struct *stop = rq->stop;
30 30
31 if (!stop || !stop->on_rq) 31 if (!stop || !task_on_rq_queued(stop))
32 return NULL; 32 return NULL;
33 33
34 put_prev_task(rq, prev); 34 put_prev_task(rq, prev);