aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@parallels.com>2014-08-20 05:47:32 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-20 08:52:59 -0400
commitda0c1e65b51a289540159663aa4b90ba2366bc21 (patch)
treeed3da6438c901a5b51eaf5ed57f94b56c271572a /kernel/sched/core.c
parentf36c019c79edb3a89920afae1b2b45987af1a112 (diff)
sched: Add wrapper for checking task_struct::on_rq
Implement task_on_rq_queued() and use it everywhere instead of on_rq check. No functional changes. The only exception is we do not use the wrapper in check_for_tasks(), because it requires to export task_on_rq_queued() in global header files. Next patch in series would return it back, so we do not twist it from here to there. Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Turner <pjt@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1408528052.23412.87.camel@tkhai Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c82
1 files changed, 41 insertions, 41 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4f2826f46e95..a02b624fee6c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1043,7 +1043,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1043 * A queue event has occurred, and we're going to schedule. In 1043 * A queue event has occurred, and we're going to schedule. In
1044 * this case, we can save a useless back to back clock update. 1044 * this case, we can save a useless back to back clock update.
1045 */ 1045 */
1046 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) 1046 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1047 rq->skip_clock_update = 1; 1047 rq->skip_clock_update = 1;
1048} 1048}
1049 1049
@@ -1088,7 +1088,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1088 1088
1089static void __migrate_swap_task(struct task_struct *p, int cpu) 1089static void __migrate_swap_task(struct task_struct *p, int cpu)
1090{ 1090{
1091 if (p->on_rq) { 1091 if (task_on_rq_queued(p)) {
1092 struct rq *src_rq, *dst_rq; 1092 struct rq *src_rq, *dst_rq;
1093 1093
1094 src_rq = task_rq(p); 1094 src_rq = task_rq(p);
@@ -1214,7 +1214,7 @@ static int migration_cpu_stop(void *data);
1214unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1214unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1215{ 1215{
1216 unsigned long flags; 1216 unsigned long flags;
1217 int running, on_rq; 1217 int running, queued;
1218 unsigned long ncsw; 1218 unsigned long ncsw;
1219 struct rq *rq; 1219 struct rq *rq;
1220 1220
@@ -1252,7 +1252,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1252 rq = task_rq_lock(p, &flags); 1252 rq = task_rq_lock(p, &flags);
1253 trace_sched_wait_task(p); 1253 trace_sched_wait_task(p);
1254 running = task_running(rq, p); 1254 running = task_running(rq, p);
1255 on_rq = p->on_rq; 1255 queued = task_on_rq_queued(p);
1256 ncsw = 0; 1256 ncsw = 0;
1257 if (!match_state || p->state == match_state) 1257 if (!match_state || p->state == match_state)
1258 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1258 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -1284,7 +1284,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1284 * running right now), it's preempted, and we should 1284 * running right now), it's preempted, and we should
1285 * yield - it could be a while. 1285 * yield - it could be a while.
1286 */ 1286 */
1287 if (unlikely(on_rq)) { 1287 if (unlikely(queued)) {
1288 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1288 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1289 1289
1290 set_current_state(TASK_UNINTERRUPTIBLE); 1290 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1478,7 +1478,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1478static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1478static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1479{ 1479{
1480 activate_task(rq, p, en_flags); 1480 activate_task(rq, p, en_flags);
1481 p->on_rq = 1; 1481 p->on_rq = TASK_ON_RQ_QUEUED;
1482 1482
1483 /* if a worker is waking up, notify workqueue */ 1483 /* if a worker is waking up, notify workqueue */
1484 if (p->flags & PF_WQ_WORKER) 1484 if (p->flags & PF_WQ_WORKER)
@@ -1537,7 +1537,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
1537 int ret = 0; 1537 int ret = 0;
1538 1538
1539 rq = __task_rq_lock(p); 1539 rq = __task_rq_lock(p);
1540 if (p->on_rq) { 1540 if (task_on_rq_queued(p)) {
1541 /* check_preempt_curr() may use rq clock */ 1541 /* check_preempt_curr() may use rq clock */
1542 update_rq_clock(rq); 1542 update_rq_clock(rq);
1543 ttwu_do_wakeup(rq, p, wake_flags); 1543 ttwu_do_wakeup(rq, p, wake_flags);
@@ -1678,7 +1678,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1678 success = 1; /* we're going to change ->state */ 1678 success = 1; /* we're going to change ->state */
1679 cpu = task_cpu(p); 1679 cpu = task_cpu(p);
1680 1680
1681 if (p->on_rq && ttwu_remote(p, wake_flags)) 1681 if (task_on_rq_queued(p) && ttwu_remote(p, wake_flags))
1682 goto stat; 1682 goto stat;
1683 1683
1684#ifdef CONFIG_SMP 1684#ifdef CONFIG_SMP
@@ -1742,7 +1742,7 @@ static void try_to_wake_up_local(struct task_struct *p)
1742 if (!(p->state & TASK_NORMAL)) 1742 if (!(p->state & TASK_NORMAL))
1743 goto out; 1743 goto out;
1744 1744
1745 if (!p->on_rq) 1745 if (!task_on_rq_queued(p))
1746 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 1746 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1747 1747
1748 ttwu_do_wakeup(rq, p, 0); 1748 ttwu_do_wakeup(rq, p, 0);
@@ -2095,7 +2095,7 @@ void wake_up_new_task(struct task_struct *p)
2095 init_task_runnable_average(p); 2095 init_task_runnable_average(p);
2096 rq = __task_rq_lock(p); 2096 rq = __task_rq_lock(p);
2097 activate_task(rq, p, 0); 2097 activate_task(rq, p, 0);
2098 p->on_rq = 1; 2098 p->on_rq = TASK_ON_RQ_QUEUED;
2099 trace_sched_wakeup_new(p, true); 2099 trace_sched_wakeup_new(p, true);
2100 check_preempt_curr(rq, p, WF_FORK); 2100 check_preempt_curr(rq, p, WF_FORK);
2101#ifdef CONFIG_SMP 2101#ifdef CONFIG_SMP
@@ -2444,7 +2444,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2444 * project cycles that may never be accounted to this 2444 * project cycles that may never be accounted to this
2445 * thread, breaking clock_gettime(). 2445 * thread, breaking clock_gettime().
2446 */ 2446 */
2447 if (task_current(rq, p) && p->on_rq) { 2447 if (task_current(rq, p) && task_on_rq_queued(p)) {
2448 update_rq_clock(rq); 2448 update_rq_clock(rq);
2449 ns = rq_clock_task(rq) - p->se.exec_start; 2449 ns = rq_clock_task(rq) - p->se.exec_start;
2450 if ((s64)ns < 0) 2450 if ((s64)ns < 0)
@@ -2490,7 +2490,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2490 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 2490 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2491 * been accounted, so we're correct here as well. 2491 * been accounted, so we're correct here as well.
2492 */ 2492 */
2493 if (!p->on_cpu || !p->on_rq) 2493 if (!p->on_cpu || !task_on_rq_queued(p))
2494 return p->se.sum_exec_runtime; 2494 return p->se.sum_exec_runtime;
2495#endif 2495#endif
2496 2496
@@ -2794,7 +2794,7 @@ need_resched:
2794 switch_count = &prev->nvcsw; 2794 switch_count = &prev->nvcsw;
2795 } 2795 }
2796 2796
2797 if (prev->on_rq || rq->skip_clock_update < 0) 2797 if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
2798 update_rq_clock(rq); 2798 update_rq_clock(rq);
2799 2799
2800 next = pick_next_task(rq, prev); 2800 next = pick_next_task(rq, prev);
@@ -2959,7 +2959,7 @@ EXPORT_SYMBOL(default_wake_function);
2959 */ 2959 */
2960void rt_mutex_setprio(struct task_struct *p, int prio) 2960void rt_mutex_setprio(struct task_struct *p, int prio)
2961{ 2961{
2962 int oldprio, on_rq, running, enqueue_flag = 0; 2962 int oldprio, queued, running, enqueue_flag = 0;
2963 struct rq *rq; 2963 struct rq *rq;
2964 const struct sched_class *prev_class; 2964 const struct sched_class *prev_class;
2965 2965
@@ -2988,9 +2988,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
2988 trace_sched_pi_setprio(p, prio); 2988 trace_sched_pi_setprio(p, prio);
2989 oldprio = p->prio; 2989 oldprio = p->prio;
2990 prev_class = p->sched_class; 2990 prev_class = p->sched_class;
2991 on_rq = p->on_rq; 2991 queued = task_on_rq_queued(p);
2992 running = task_current(rq, p); 2992 running = task_current(rq, p);
2993 if (on_rq) 2993 if (queued)
2994 dequeue_task(rq, p, 0); 2994 dequeue_task(rq, p, 0);
2995 if (running) 2995 if (running)
2996 p->sched_class->put_prev_task(rq, p); 2996 p->sched_class->put_prev_task(rq, p);
@@ -3030,7 +3030,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3030 3030
3031 if (running) 3031 if (running)
3032 p->sched_class->set_curr_task(rq); 3032 p->sched_class->set_curr_task(rq);
3033 if (on_rq) 3033 if (queued)
3034 enqueue_task(rq, p, enqueue_flag); 3034 enqueue_task(rq, p, enqueue_flag);
3035 3035
3036 check_class_changed(rq, p, prev_class, oldprio); 3036 check_class_changed(rq, p, prev_class, oldprio);
@@ -3041,7 +3041,7 @@ out_unlock:
3041 3041
3042void set_user_nice(struct task_struct *p, long nice) 3042void set_user_nice(struct task_struct *p, long nice)
3043{ 3043{
3044 int old_prio, delta, on_rq; 3044 int old_prio, delta, queued;
3045 unsigned long flags; 3045 unsigned long flags;
3046 struct rq *rq; 3046 struct rq *rq;
3047 3047
@@ -3062,8 +3062,8 @@ void set_user_nice(struct task_struct *p, long nice)
3062 p->static_prio = NICE_TO_PRIO(nice); 3062 p->static_prio = NICE_TO_PRIO(nice);
3063 goto out_unlock; 3063 goto out_unlock;
3064 } 3064 }
3065 on_rq = p->on_rq; 3065 queued = task_on_rq_queued(p);
3066 if (on_rq) 3066 if (queued)
3067 dequeue_task(rq, p, 0); 3067 dequeue_task(rq, p, 0);
3068 3068
3069 p->static_prio = NICE_TO_PRIO(nice); 3069 p->static_prio = NICE_TO_PRIO(nice);
@@ -3072,7 +3072,7 @@ void set_user_nice(struct task_struct *p, long nice)
3072 p->prio = effective_prio(p); 3072 p->prio = effective_prio(p);
3073 delta = p->prio - old_prio; 3073 delta = p->prio - old_prio;
3074 3074
3075 if (on_rq) { 3075 if (queued) {
3076 enqueue_task(rq, p, 0); 3076 enqueue_task(rq, p, 0);
3077 /* 3077 /*
3078 * If the task increased its priority or is running and 3078 * If the task increased its priority or is running and
@@ -3344,7 +3344,7 @@ static int __sched_setscheduler(struct task_struct *p,
3344{ 3344{
3345 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3345 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3346 MAX_RT_PRIO - 1 - attr->sched_priority; 3346 MAX_RT_PRIO - 1 - attr->sched_priority;
3347 int retval, oldprio, oldpolicy = -1, on_rq, running; 3347 int retval, oldprio, oldpolicy = -1, queued, running;
3348 int policy = attr->sched_policy; 3348 int policy = attr->sched_policy;
3349 unsigned long flags; 3349 unsigned long flags;
3350 const struct sched_class *prev_class; 3350 const struct sched_class *prev_class;
@@ -3541,9 +3541,9 @@ change:
3541 return 0; 3541 return 0;
3542 } 3542 }
3543 3543
3544 on_rq = p->on_rq; 3544 queued = task_on_rq_queued(p);
3545 running = task_current(rq, p); 3545 running = task_current(rq, p);
3546 if (on_rq) 3546 if (queued)
3547 dequeue_task(rq, p, 0); 3547 dequeue_task(rq, p, 0);
3548 if (running) 3548 if (running)
3549 p->sched_class->put_prev_task(rq, p); 3549 p->sched_class->put_prev_task(rq, p);
@@ -3553,7 +3553,7 @@ change:
3553 3553
3554 if (running) 3554 if (running)
3555 p->sched_class->set_curr_task(rq); 3555 p->sched_class->set_curr_task(rq);
3556 if (on_rq) { 3556 if (queued) {
3557 /* 3557 /*
3558 * We enqueue to tail when the priority of a task is 3558 * We enqueue to tail when the priority of a task is
3559 * increased (user space view). 3559 * increased (user space view).
@@ -4568,7 +4568,7 @@ void init_idle(struct task_struct *idle, int cpu)
4568 rcu_read_unlock(); 4568 rcu_read_unlock();
4569 4569
4570 rq->curr = rq->idle = idle; 4570 rq->curr = rq->idle = idle;
4571 idle->on_rq = 1; 4571 idle->on_rq = TASK_ON_RQ_QUEUED;
4572#if defined(CONFIG_SMP) 4572#if defined(CONFIG_SMP)
4573 idle->on_cpu = 1; 4573 idle->on_cpu = 1;
4574#endif 4574#endif
@@ -4645,7 +4645,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4645 goto out; 4645 goto out;
4646 4646
4647 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 4647 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4648 if (p->on_rq) { 4648 if (task_on_rq_queued(p)) {
4649 struct migration_arg arg = { p, dest_cpu }; 4649 struct migration_arg arg = { p, dest_cpu };
4650 /* Need help from migration thread: drop lock and wait. */ 4650 /* Need help from migration thread: drop lock and wait. */
4651 task_rq_unlock(rq, p, &flags); 4651 task_rq_unlock(rq, p, &flags);
@@ -4695,7 +4695,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4695 * If we're not on a rq, the next wake-up will ensure we're 4695 * If we're not on a rq, the next wake-up will ensure we're
4696 * placed properly. 4696 * placed properly.
4697 */ 4697 */
4698 if (p->on_rq) { 4698 if (task_on_rq_queued(p)) {
4699 dequeue_task(rq_src, p, 0); 4699 dequeue_task(rq_src, p, 0);
4700 set_task_cpu(p, dest_cpu); 4700 set_task_cpu(p, dest_cpu);
4701 enqueue_task(rq_dest, p, 0); 4701 enqueue_task(rq_dest, p, 0);
@@ -4736,13 +4736,13 @@ void sched_setnuma(struct task_struct *p, int nid)
4736{ 4736{
4737 struct rq *rq; 4737 struct rq *rq;
4738 unsigned long flags; 4738 unsigned long flags;
4739 bool on_rq, running; 4739 bool queued, running;
4740 4740
4741 rq = task_rq_lock(p, &flags); 4741 rq = task_rq_lock(p, &flags);
4742 on_rq = p->on_rq; 4742 queued = task_on_rq_queued(p);
4743 running = task_current(rq, p); 4743 running = task_current(rq, p);
4744 4744
4745 if (on_rq) 4745 if (queued)
4746 dequeue_task(rq, p, 0); 4746 dequeue_task(rq, p, 0);
4747 if (running) 4747 if (running)
4748 p->sched_class->put_prev_task(rq, p); 4748 p->sched_class->put_prev_task(rq, p);
@@ -4751,7 +4751,7 @@ void sched_setnuma(struct task_struct *p, int nid)
4751 4751
4752 if (running) 4752 if (running)
4753 p->sched_class->set_curr_task(rq); 4753 p->sched_class->set_curr_task(rq);
4754 if (on_rq) 4754 if (queued)
4755 enqueue_task(rq, p, 0); 4755 enqueue_task(rq, p, 0);
4756 task_rq_unlock(rq, p, &flags); 4756 task_rq_unlock(rq, p, &flags);
4757} 4757}
@@ -7116,13 +7116,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7116 .sched_policy = SCHED_NORMAL, 7116 .sched_policy = SCHED_NORMAL,
7117 }; 7117 };
7118 int old_prio = p->prio; 7118 int old_prio = p->prio;
7119 int on_rq; 7119 int queued;
7120 7120
7121 on_rq = p->on_rq; 7121 queued = task_on_rq_queued(p);
7122 if (on_rq) 7122 if (queued)
7123 dequeue_task(rq, p, 0); 7123 dequeue_task(rq, p, 0);
7124 __setscheduler(rq, p, &attr); 7124 __setscheduler(rq, p, &attr);
7125 if (on_rq) { 7125 if (queued) {
7126 enqueue_task(rq, p, 0); 7126 enqueue_task(rq, p, 0);
7127 resched_curr(rq); 7127 resched_curr(rq);
7128 } 7128 }
@@ -7309,16 +7309,16 @@ void sched_offline_group(struct task_group *tg)
7309void sched_move_task(struct task_struct *tsk) 7309void sched_move_task(struct task_struct *tsk)
7310{ 7310{
7311 struct task_group *tg; 7311 struct task_group *tg;
7312 int on_rq, running; 7312 int queued, running;
7313 unsigned long flags; 7313 unsigned long flags;
7314 struct rq *rq; 7314 struct rq *rq;
7315 7315
7316 rq = task_rq_lock(tsk, &flags); 7316 rq = task_rq_lock(tsk, &flags);
7317 7317
7318 running = task_current(rq, tsk); 7318 running = task_current(rq, tsk);
7319 on_rq = tsk->on_rq; 7319 queued = task_on_rq_queued(tsk);
7320 7320
7321 if (on_rq) 7321 if (queued)
7322 dequeue_task(rq, tsk, 0); 7322 dequeue_task(rq, tsk, 0);
7323 if (unlikely(running)) 7323 if (unlikely(running))
7324 tsk->sched_class->put_prev_task(rq, tsk); 7324 tsk->sched_class->put_prev_task(rq, tsk);
@@ -7331,14 +7331,14 @@ void sched_move_task(struct task_struct *tsk)
7331 7331
7332#ifdef CONFIG_FAIR_GROUP_SCHED 7332#ifdef CONFIG_FAIR_GROUP_SCHED
7333 if (tsk->sched_class->task_move_group) 7333 if (tsk->sched_class->task_move_group)
7334 tsk->sched_class->task_move_group(tsk, on_rq); 7334 tsk->sched_class->task_move_group(tsk, queued);
7335 else 7335 else
7336#endif 7336#endif
7337 set_task_rq(tsk, task_cpu(tsk)); 7337 set_task_rq(tsk, task_cpu(tsk));
7338 7338
7339 if (unlikely(running)) 7339 if (unlikely(running))
7340 tsk->sched_class->set_curr_task(rq); 7340 tsk->sched_class->set_curr_task(rq);
7341 if (on_rq) 7341 if (queued)
7342 enqueue_task(rq, tsk, 0); 7342 enqueue_task(rq, tsk, 0);
7343 7343
7344 task_rq_unlock(rq, tsk, &flags); 7344 task_rq_unlock(rq, tsk, &flags);