diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-11-25 19:44:03 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-04 01:52:26 -0500 |
commit | 1019a359d3dc4b64d0e1e5a5efcb725d5e83994d (patch) | |
tree | e3ea8601967f078e09dc436670cf8dfcd903311d | |
parent | a7bebf488791aa1036f3e6629daf01d01f705dcb (diff) |
sched/deadline: Fix stale yield state
When we fail to start the deadline timer in update_curr_dl(), we
forget to clear ->dl_yielded, resulting in wrecked time keeping.
Since the natural place to clear both ->dl_yielded and ->dl_throttled
is in replenish_dl_entity(); both are after all waiting for that event;
make it so.
Luckily since 67dfa1b756f2 ("sched/deadline: Implement
cancel_dl_timer() to use in switched_from_dl()") the
task_on_rq_queued() condition in dl_task_timer() must be true, and can
therefore call enqueue_task_dl() unconditionally.
Reported-by: Wanpeng Li <wanpeng.li@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Kirill Tkhai <ktkhai@parallels.com>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1416962647-76792-4-git-send-email-wanpeng.li@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/deadline.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 7b684f9341a5..a027799ae130 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, | |||
350 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; | 350 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; |
351 | dl_se->runtime = pi_se->dl_runtime; | 351 | dl_se->runtime = pi_se->dl_runtime; |
352 | } | 352 | } |
353 | |||
354 | if (dl_se->dl_yielded) | ||
355 | dl_se->dl_yielded = 0; | ||
356 | if (dl_se->dl_throttled) | ||
357 | dl_se->dl_throttled = 0; | ||
353 | } | 358 | } |
354 | 359 | ||
355 | /* | 360 | /* |
@@ -536,23 +541,19 @@ again: | |||
536 | 541 | ||
537 | sched_clock_tick(); | 542 | sched_clock_tick(); |
538 | update_rq_clock(rq); | 543 | update_rq_clock(rq); |
539 | dl_se->dl_throttled = 0; | 544 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); |
540 | dl_se->dl_yielded = 0; | 545 | if (dl_task(rq->curr)) |
541 | if (task_on_rq_queued(p)) { | 546 | check_preempt_curr_dl(rq, p, 0); |
542 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | 547 | else |
543 | if (dl_task(rq->curr)) | 548 | resched_curr(rq); |
544 | check_preempt_curr_dl(rq, p, 0); | ||
545 | else | ||
546 | resched_curr(rq); | ||
547 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
548 | /* | 550 | /* |
549 | * Queueing this task back might have overloaded rq, | 551 | * Queueing this task back might have overloaded rq, |
550 | * check if we need to kick someone away. | 552 | * check if we need to kick someone away. |
551 | */ | 553 | */ |
552 | if (has_pushable_dl_tasks(rq)) | 554 | if (has_pushable_dl_tasks(rq)) |
553 | push_dl_task(rq); | 555 | push_dl_task(rq); |
554 | #endif | 556 | #endif |
555 | } | ||
556 | unlock: | 557 | unlock: |
557 | raw_spin_unlock(&rq->lock); | 558 | raw_spin_unlock(&rq->lock); |
558 | 559 | ||
@@ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq) | |||
613 | 614 | ||
614 | dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; | 615 | dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; |
615 | if (dl_runtime_exceeded(rq, dl_se)) { | 616 | if (dl_runtime_exceeded(rq, dl_se)) { |
617 | dl_se->dl_throttled = 1; | ||
616 | __dequeue_task_dl(rq, curr, 0); | 618 | __dequeue_task_dl(rq, curr, 0); |
617 | if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) | 619 | if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted))) |
618 | dl_se->dl_throttled = 1; | ||
619 | else | ||
620 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); | 620 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); |
621 | 621 | ||
622 | if (!is_leftmost(curr, &rq->dl)) | 622 | if (!is_leftmost(curr, &rq->dl)) |
@@ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
853 | * its rq, the bandwidth timer callback (which clearly has not | 853 | * its rq, the bandwidth timer callback (which clearly has not |
854 | * run yet) will take care of this. | 854 | * run yet) will take care of this. |
855 | */ | 855 | */ |
856 | if (p->dl.dl_throttled) | 856 | if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) |
857 | return; | 857 | return; |
858 | 858 | ||
859 | enqueue_dl_entity(&p->dl, pi_se, flags); | 859 | enqueue_dl_entity(&p->dl, pi_se, flags); |