diff options
Diffstat (limited to 'kernel/sched/deadline.c')
| -rw-r--r-- | kernel/sched/deadline.c | 84 |
1 files changed, 52 insertions, 32 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 726470d47f87..3fa8fa6d9403 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, | |||
| 350 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; | 350 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; |
| 351 | dl_se->runtime = pi_se->dl_runtime; | 351 | dl_se->runtime = pi_se->dl_runtime; |
| 352 | } | 352 | } |
| 353 | |||
| 354 | if (dl_se->dl_yielded) | ||
| 355 | dl_se->dl_yielded = 0; | ||
| 356 | if (dl_se->dl_throttled) | ||
| 357 | dl_se->dl_throttled = 0; | ||
| 353 | } | 358 | } |
| 354 | 359 | ||
| 355 | /* | 360 | /* |
| @@ -506,16 +511,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) | |||
| 506 | struct sched_dl_entity, | 511 | struct sched_dl_entity, |
| 507 | dl_timer); | 512 | dl_timer); |
| 508 | struct task_struct *p = dl_task_of(dl_se); | 513 | struct task_struct *p = dl_task_of(dl_se); |
| 514 | unsigned long flags; | ||
| 509 | struct rq *rq; | 515 | struct rq *rq; |
| 510 | again: | ||
| 511 | rq = task_rq(p); | ||
| 512 | raw_spin_lock(&rq->lock); | ||
| 513 | 516 | ||
| 514 | if (rq != task_rq(p)) { | 517 | rq = task_rq_lock(current, &flags); |
| 515 | /* Task was moved, retrying. */ | ||
| 516 | raw_spin_unlock(&rq->lock); | ||
| 517 | goto again; | ||
| 518 | } | ||
| 519 | 518 | ||
| 520 | /* | 519 | /* |
| 521 | * We need to take care of several possible races here: | 520 | * We need to take care of several possible races here: |
| @@ -536,25 +535,41 @@ again: | |||
| 536 | 535 | ||
| 537 | sched_clock_tick(); | 536 | sched_clock_tick(); |
| 538 | update_rq_clock(rq); | 537 | update_rq_clock(rq); |
| 539 | dl_se->dl_throttled = 0; | 538 | |
| 540 | dl_se->dl_yielded = 0; | 539 | /* |
| 541 | if (task_on_rq_queued(p)) { | 540 | * If the throttle happened during sched-out; like: |
| 542 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | 541 | * |
| 543 | if (dl_task(rq->curr)) | 542 | * schedule() |
| 544 | check_preempt_curr_dl(rq, p, 0); | 543 | * deactivate_task() |
| 545 | else | 544 | * dequeue_task_dl() |
| 546 | resched_curr(rq); | 545 | * update_curr_dl() |
| 546 | * start_dl_timer() | ||
| 547 | * __dequeue_task_dl() | ||
| 548 | * prev->on_rq = 0; | ||
| 549 | * | ||
| 550 | * We can be both throttled and !queued. Replenish the counter | ||
| 551 | * but do not enqueue -- wait for our wakeup to do that. | ||
| 552 | */ | ||
| 553 | if (!task_on_rq_queued(p)) { | ||
| 554 | replenish_dl_entity(dl_se, dl_se); | ||
| 555 | goto unlock; | ||
| 556 | } | ||
| 557 | |||
| 558 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | ||
| 559 | if (dl_task(rq->curr)) | ||
| 560 | check_preempt_curr_dl(rq, p, 0); | ||
| 561 | else | ||
| 562 | resched_curr(rq); | ||
| 547 | #ifdef CONFIG_SMP | 563 | #ifdef CONFIG_SMP |
| 548 | /* | 564 | /* |
| 549 | * Queueing this task back might have overloaded rq, | 565 | * Queueing this task back might have overloaded rq, |
| 550 | * check if we need to kick someone away. | 566 | * check if we need to kick someone away. |
| 551 | */ | 567 | */ |
| 552 | if (has_pushable_dl_tasks(rq)) | 568 | if (has_pushable_dl_tasks(rq)) |
| 553 | push_dl_task(rq); | 569 | push_dl_task(rq); |
| 554 | #endif | 570 | #endif |
| 555 | } | ||
| 556 | unlock: | 571 | unlock: |
| 557 | raw_spin_unlock(&rq->lock); | 572 | task_rq_unlock(rq, current, &flags); |
| 558 | 573 | ||
| 559 | return HRTIMER_NORESTART; | 574 | return HRTIMER_NORESTART; |
| 560 | } | 575 | } |
| @@ -613,10 +628,9 @@ static void update_curr_dl(struct rq *rq) | |||
| 613 | 628 | ||
| 614 | dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; | 629 | dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; |
| 615 | if (dl_runtime_exceeded(rq, dl_se)) { | 630 | if (dl_runtime_exceeded(rq, dl_se)) { |
| 631 | dl_se->dl_throttled = 1; | ||
| 616 | __dequeue_task_dl(rq, curr, 0); | 632 | __dequeue_task_dl(rq, curr, 0); |
| 617 | if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) | 633 | if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted))) |
| 618 | dl_se->dl_throttled = 1; | ||
| 619 | else | ||
| 620 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); | 634 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); |
| 621 | 635 | ||
| 622 | if (!is_leftmost(curr, &rq->dl)) | 636 | if (!is_leftmost(curr, &rq->dl)) |
| @@ -853,7 +867,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 853 | * its rq, the bandwidth timer callback (which clearly has not | 867 | * its rq, the bandwidth timer callback (which clearly has not |
| 854 | * run yet) will take care of this. | 868 | * run yet) will take care of this. |
| 855 | */ | 869 | */ |
| 856 | if (p->dl.dl_throttled) | 870 | if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) |
| 857 | return; | 871 | return; |
| 858 | 872 | ||
| 859 | enqueue_dl_entity(&p->dl, pi_se, flags); | 873 | enqueue_dl_entity(&p->dl, pi_se, flags); |
| @@ -898,6 +912,7 @@ static void yield_task_dl(struct rq *rq) | |||
| 898 | rq->curr->dl.dl_yielded = 1; | 912 | rq->curr->dl.dl_yielded = 1; |
| 899 | p->dl.runtime = 0; | 913 | p->dl.runtime = 0; |
| 900 | } | 914 | } |
| 915 | update_rq_clock(rq); | ||
| 901 | update_curr_dl(rq); | 916 | update_curr_dl(rq); |
| 902 | } | 917 | } |
| 903 | 918 | ||
| @@ -1073,7 +1088,13 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) | |||
| 1073 | { | 1088 | { |
| 1074 | update_curr_dl(rq); | 1089 | update_curr_dl(rq); |
| 1075 | 1090 | ||
| 1076 | if (hrtick_enabled(rq) && queued && p->dl.runtime > 0) | 1091 | /* |
| 1092 | * Even when we have runtime, update_curr_dl() might have resulted in us | ||
| 1093 | * not being the leftmost task anymore. In that case NEED_RESCHED will | ||
| 1094 | * be set and schedule() will start a new hrtick for the next task. | ||
| 1095 | */ | ||
| 1096 | if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && | ||
| 1097 | is_leftmost(p, &rq->dl)) | ||
| 1077 | start_hrtick_dl(rq, p); | 1098 | start_hrtick_dl(rq, p); |
| 1078 | } | 1099 | } |
| 1079 | 1100 | ||
| @@ -1166,9 +1187,6 @@ static int find_later_rq(struct task_struct *task) | |||
| 1166 | * We have to consider system topology and task affinity | 1187 | * We have to consider system topology and task affinity |
| 1167 | * first, then we can look for a suitable cpu. | 1188 | * first, then we can look for a suitable cpu. |
| 1168 | */ | 1189 | */ |
| 1169 | cpumask_copy(later_mask, task_rq(task)->rd->span); | ||
| 1170 | cpumask_and(later_mask, later_mask, cpu_active_mask); | ||
| 1171 | cpumask_and(later_mask, later_mask, &task->cpus_allowed); | ||
| 1172 | best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, | 1190 | best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, |
| 1173 | task, later_mask); | 1191 | task, later_mask); |
| 1174 | if (best_cpu == -1) | 1192 | if (best_cpu == -1) |
| @@ -1563,6 +1581,7 @@ static void rq_online_dl(struct rq *rq) | |||
| 1563 | if (rq->dl.overloaded) | 1581 | if (rq->dl.overloaded) |
| 1564 | dl_set_overload(rq); | 1582 | dl_set_overload(rq); |
| 1565 | 1583 | ||
| 1584 | cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); | ||
| 1566 | if (rq->dl.dl_nr_running > 0) | 1585 | if (rq->dl.dl_nr_running > 0) |
| 1567 | cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); | 1586 | cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); |
| 1568 | } | 1587 | } |
| @@ -1574,6 +1593,7 @@ static void rq_offline_dl(struct rq *rq) | |||
| 1574 | dl_clear_overload(rq); | 1593 | dl_clear_overload(rq); |
| 1575 | 1594 | ||
| 1576 | cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); | 1595 | cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); |
| 1596 | cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); | ||
| 1577 | } | 1597 | } |
| 1578 | 1598 | ||
| 1579 | void init_sched_dl_class(void) | 1599 | void init_sched_dl_class(void) |
