aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorKirill Tkhai <tkhai@yandex.ru>2015-02-16 07:38:34 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-18 08:27:31 -0500
commita79ec89fd8459f0de850898f432a2a57d60e64de (patch)
tree23b42d39bfc96342d22cd55dade4fef1c52f89ef /kernel
parent3960c8c0c7891dfc0f7be687cbdabb0d6916d614 (diff)
sched/dl: Prevent enqueue of a sleeping task in dl_task_timer()
A deadline task may be throttled and dequeued at the same time. This happens, when it becomes throttled in schedule(), which is called to go to sleep: current->state = TASK_INTERRUPTIBLE; schedule() deactivate_task() dequeue_task_dl() update_curr_dl() start_dl_timer() __dequeue_task_dl() prev->on_rq = 0; Later the timer fires, but the task is still dequeued: dl_task_timer() enqueue_task_dl() /* queues on dl_rq; on_rq remains 0 */ Someone wakes it up: try_to_wake_up() enqueue_dl_entity() BUG_ON(on_dl_rq()) Patch fixes this problem, it prevents queueing !on_rq tasks on dl_rq. Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> [ Wrote comment. ] Cc: Juri Lelli <juri.lelli@arm.com> Fixes: 1019a359d3dc ("sched/deadline: Fix stale yield state") Link: http://lkml.kernel.org/r/1374601424090314@web4j.yandex.ru Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/deadline.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e88847d9fc6a..9908c950d776 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -535,6 +535,26 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
535 535
536 sched_clock_tick(); 536 sched_clock_tick();
537 update_rq_clock(rq); 537 update_rq_clock(rq);
538
539 /*
540 * If the throttle happened during sched-out; like:
541 *
542 * schedule()
543 * deactivate_task()
544 * dequeue_task_dl()
545 * update_curr_dl()
546 * start_dl_timer()
547 * __dequeue_task_dl()
548 * prev->on_rq = 0;
549 *
550 * We can be both throttled and !queued. Replenish the counter
551 * but do not enqueue -- wait for our wakeup to do that.
552 */
553 if (!task_on_rq_queued(p)) {
554 replenish_dl_entity(dl_se, dl_se);
555 goto unlock;
556 }
557
538 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 558 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
539 if (dl_task(rq->curr)) 559 if (dl_task(rq->curr))
540 check_preempt_curr_dl(rq, p, 0); 560 check_preempt_curr_dl(rq, p, 0);