diff options
author | Kirill Tkhai <ktkhai@parallels.com> | 2014-08-20 05:47:32 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-08-20 08:52:59 -0400 |
commit | da0c1e65b51a289540159663aa4b90ba2366bc21 (patch) | |
tree | ed3da6438c901a5b51eaf5ed57f94b56c271572a /kernel/sched/deadline.c | |
parent | f36c019c79edb3a89920afae1b2b45987af1a112 (diff) |
sched: Add wrapper for checking task_struct::on_rq
Implement task_on_rq_queued() and use it everywhere instead of
on_rq check. No functional changes.
The only exception is we do not use the wrapper in
check_for_tasks(), because it requires to export
task_on_rq_queued() in global header files. Next patch in series
would return it back, so we do not twist it from here to there.
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1408528052.23412.87.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r-- | kernel/sched/deadline.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 255ce138b652..d21a8e0259d2 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -530,7 +530,7 @@ again: | |||
530 | update_rq_clock(rq); | 530 | update_rq_clock(rq); |
531 | dl_se->dl_throttled = 0; | 531 | dl_se->dl_throttled = 0; |
532 | dl_se->dl_yielded = 0; | 532 | dl_se->dl_yielded = 0; |
533 | if (p->on_rq) { | 533 | if (task_on_rq_queued(p)) { |
534 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | 534 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); |
535 | if (task_has_dl_policy(rq->curr)) | 535 | if (task_has_dl_policy(rq->curr)) |
536 | check_preempt_curr_dl(rq, p, 0); | 536 | check_preempt_curr_dl(rq, p, 0); |
@@ -1030,7 +1030,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) | |||
1030 | * means a stop task can slip in, in which case we need to | 1030 | * means a stop task can slip in, in which case we need to |
1031 | * re-start task selection. | 1031 | * re-start task selection. |
1032 | */ | 1032 | */ |
1033 | if (rq->stop && rq->stop->on_rq) | 1033 | if (rq->stop && task_on_rq_queued(rq->stop)) |
1034 | return RETRY_TASK; | 1034 | return RETRY_TASK; |
1035 | } | 1035 | } |
1036 | 1036 | ||
@@ -1257,7 +1257,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) | |||
1257 | if (unlikely(task_rq(task) != rq || | 1257 | if (unlikely(task_rq(task) != rq || |
1258 | !cpumask_test_cpu(later_rq->cpu, | 1258 | !cpumask_test_cpu(later_rq->cpu, |
1259 | &task->cpus_allowed) || | 1259 | &task->cpus_allowed) || |
1260 | task_running(rq, task) || !task->on_rq)) { | 1260 | task_running(rq, task) || |
1261 | !task_on_rq_queued(task))) { | ||
1261 | double_unlock_balance(rq, later_rq); | 1262 | double_unlock_balance(rq, later_rq); |
1262 | later_rq = NULL; | 1263 | later_rq = NULL; |
1263 | break; | 1264 | break; |
@@ -1296,7 +1297,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) | |||
1296 | BUG_ON(task_current(rq, p)); | 1297 | BUG_ON(task_current(rq, p)); |
1297 | BUG_ON(p->nr_cpus_allowed <= 1); | 1298 | BUG_ON(p->nr_cpus_allowed <= 1); |
1298 | 1299 | ||
1299 | BUG_ON(!p->on_rq); | 1300 | BUG_ON(!task_on_rq_queued(p)); |
1300 | BUG_ON(!dl_task(p)); | 1301 | BUG_ON(!dl_task(p)); |
1301 | 1302 | ||
1302 | return p; | 1303 | return p; |
@@ -1443,7 +1444,7 @@ static int pull_dl_task(struct rq *this_rq) | |||
1443 | dl_time_before(p->dl.deadline, | 1444 | dl_time_before(p->dl.deadline, |
1444 | this_rq->dl.earliest_dl.curr))) { | 1445 | this_rq->dl.earliest_dl.curr))) { |
1445 | WARN_ON(p == src_rq->curr); | 1446 | WARN_ON(p == src_rq->curr); |
1446 | WARN_ON(!p->on_rq); | 1447 | WARN_ON(!task_on_rq_queued(p)); |
1447 | 1448 | ||
1448 | /* | 1449 | /* |
1449 | * Then we pull iff p has actually an earlier | 1450 | * Then we pull iff p has actually an earlier |
@@ -1596,7 +1597,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) | |||
1596 | if (unlikely(p->dl.dl_throttled)) | 1597 | if (unlikely(p->dl.dl_throttled)) |
1597 | return; | 1598 | return; |
1598 | 1599 | ||
1599 | if (p->on_rq && rq->curr != p) { | 1600 | if (task_on_rq_queued(p) && rq->curr != p) { |
1600 | #ifdef CONFIG_SMP | 1601 | #ifdef CONFIG_SMP |
1601 | if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) | 1602 | if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) |
1602 | /* Only reschedule if pushing failed */ | 1603 | /* Only reschedule if pushing failed */ |
@@ -1614,7 +1615,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) | |||
1614 | static void prio_changed_dl(struct rq *rq, struct task_struct *p, | 1615 | static void prio_changed_dl(struct rq *rq, struct task_struct *p, |
1615 | int oldprio) | 1616 | int oldprio) |
1616 | { | 1617 | { |
1617 | if (p->on_rq || rq->curr == p) { | 1618 | if (task_on_rq_queued(p) || rq->curr == p) { |
1618 | #ifdef CONFIG_SMP | 1619 | #ifdef CONFIG_SMP |
1619 | /* | 1620 | /* |
1620 | * This might be too much, but unfortunately | 1621 | * This might be too much, but unfortunately |