diff options
| -rw-r--r-- | kernel/sched/deadline.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 7b684f9341a5..a027799ae130 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, | |||
| 350 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; | 350 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; |
| 351 | dl_se->runtime = pi_se->dl_runtime; | 351 | dl_se->runtime = pi_se->dl_runtime; |
| 352 | } | 352 | } |
| 353 | |||
| 354 | if (dl_se->dl_yielded) | ||
| 355 | dl_se->dl_yielded = 0; | ||
| 356 | if (dl_se->dl_throttled) | ||
| 357 | dl_se->dl_throttled = 0; | ||
| 353 | } | 358 | } |
| 354 | 359 | ||
| 355 | /* | 360 | /* |
| @@ -536,23 +541,19 @@ again: | |||
| 536 | 541 | ||
| 537 | sched_clock_tick(); | 542 | sched_clock_tick(); |
| 538 | update_rq_clock(rq); | 543 | update_rq_clock(rq); |
| 539 | dl_se->dl_throttled = 0; | 544 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); |
| 540 | dl_se->dl_yielded = 0; | 545 | if (dl_task(rq->curr)) |
| 541 | if (task_on_rq_queued(p)) { | 546 | check_preempt_curr_dl(rq, p, 0); |
| 542 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | 547 | else |
| 543 | if (dl_task(rq->curr)) | 548 | resched_curr(rq); |
| 544 | check_preempt_curr_dl(rq, p, 0); | ||
| 545 | else | ||
| 546 | resched_curr(rq); | ||
| 547 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
| 548 | /* | 550 | /* |
| 549 | * Queueing this task back might have overloaded rq, | 551 | * Queueing this task back might have overloaded rq, |
| 550 | * check if we need to kick someone away. | 552 | * check if we need to kick someone away. |
| 551 | */ | 553 | */ |
| 552 | if (has_pushable_dl_tasks(rq)) | 554 | if (has_pushable_dl_tasks(rq)) |
| 553 | push_dl_task(rq); | 555 | push_dl_task(rq); |
| 554 | #endif | 556 | #endif |
| 555 | } | ||
| 556 | unlock: | 557 | unlock: |
| 557 | raw_spin_unlock(&rq->lock); | 558 | raw_spin_unlock(&rq->lock); |
| 558 | 559 | ||
| @@ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq) | |||
| 613 | 614 | ||
| 614 | dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; | 615 | dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; |
| 615 | if (dl_runtime_exceeded(rq, dl_se)) { | 616 | if (dl_runtime_exceeded(rq, dl_se)) { |
| 617 | dl_se->dl_throttled = 1; | ||
| 616 | __dequeue_task_dl(rq, curr, 0); | 618 | __dequeue_task_dl(rq, curr, 0); |
| 617 | if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) | 619 | if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted))) |
| 618 | dl_se->dl_throttled = 1; | ||
| 619 | else | ||
| 620 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); | 620 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); |
| 621 | 621 | ||
| 622 | if (!is_leftmost(curr, &rq->dl)) | 622 | if (!is_leftmost(curr, &rq->dl)) |
| @@ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 853 | * its rq, the bandwidth timer callback (which clearly has not | 853 | * its rq, the bandwidth timer callback (which clearly has not |
| 854 | * run yet) will take care of this. | 854 | * run yet) will take care of this. |
| 855 | */ | 855 | */ |
| 856 | if (p->dl.dl_throttled) | 856 | if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) |
| 857 | return; | 857 | return; |
| 858 | 858 | ||
| 859 | enqueue_dl_entity(&p->dl, pi_se, flags); | 859 | enqueue_dl_entity(&p->dl, pi_se, flags); |
