diff options
Diffstat (limited to 'kernel/sched/deadline.c')
| -rw-r--r-- | kernel/sched/deadline.c | 79 |
1 files changed, 34 insertions, 45 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e5db8c6feebd..a027799ae130 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, | |||
| 350 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; | 350 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; |
| 351 | dl_se->runtime = pi_se->dl_runtime; | 351 | dl_se->runtime = pi_se->dl_runtime; |
| 352 | } | 352 | } |
| 353 | |||
| 354 | if (dl_se->dl_yielded) | ||
| 355 | dl_se->dl_yielded = 0; | ||
| 356 | if (dl_se->dl_throttled) | ||
| 357 | dl_se->dl_throttled = 0; | ||
| 353 | } | 358 | } |
| 354 | 359 | ||
| 355 | /* | 360 | /* |
| @@ -536,23 +541,19 @@ again: | |||
| 536 | 541 | ||
| 537 | sched_clock_tick(); | 542 | sched_clock_tick(); |
| 538 | update_rq_clock(rq); | 543 | update_rq_clock(rq); |
| 539 | dl_se->dl_throttled = 0; | 544 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); |
| 540 | dl_se->dl_yielded = 0; | 545 | if (dl_task(rq->curr)) |
| 541 | if (task_on_rq_queued(p)) { | 546 | check_preempt_curr_dl(rq, p, 0); |
| 542 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | 547 | else |
| 543 | if (dl_task(rq->curr)) | 548 | resched_curr(rq); |
| 544 | check_preempt_curr_dl(rq, p, 0); | ||
| 545 | else | ||
| 546 | resched_curr(rq); | ||
| 547 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
| 548 | /* | 550 | /* |
| 549 | * Queueing this task back might have overloaded rq, | 551 | * Queueing this task back might have overloaded rq, |
| 550 | * check if we need to kick someone away. | 552 | * check if we need to kick someone away. |
| 551 | */ | 553 | */ |
| 552 | if (has_pushable_dl_tasks(rq)) | 554 | if (has_pushable_dl_tasks(rq)) |
| 553 | push_dl_task(rq); | 555 | push_dl_task(rq); |
| 554 | #endif | 556 | #endif |
| 555 | } | ||
| 556 | unlock: | 557 | unlock: |
| 557 | raw_spin_unlock(&rq->lock); | 558 | raw_spin_unlock(&rq->lock); |
| 558 | 559 | ||
| @@ -570,24 +571,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) | |||
| 570 | static | 571 | static |
| 571 | int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | 572 | int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) |
| 572 | { | 573 | { |
| 573 | int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); | 574 | return (dl_se->runtime <= 0); |
| 574 | int rorun = dl_se->runtime <= 0; | ||
| 575 | |||
| 576 | if (!rorun && !dmiss) | ||
| 577 | return 0; | ||
| 578 | |||
| 579 | /* | ||
| 580 | * If we are beyond our current deadline and we are still | ||
| 581 | * executing, then we have already used some of the runtime of | ||
| 582 | * the next instance. Thus, if we do not account that, we are | ||
| 583 | * stealing bandwidth from the system at each deadline miss! | ||
| 584 | */ | ||
| 585 | if (dmiss) { | ||
| 586 | dl_se->runtime = rorun ? dl_se->runtime : 0; | ||
| 587 | dl_se->runtime -= rq_clock(rq) - dl_se->deadline; | ||
| 588 | } | ||
| 589 | |||
| 590 | return 1; | ||
| 591 | } | 575 | } |
| 592 | 576 | ||
| 593 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | 577 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); |
| @@ -630,10 +614,9 @@ static void update_curr_dl(struct rq *rq) | |||
| 630 | 614 | ||
| 631 | dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; | 615 | dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; |
| 632 | if (dl_runtime_exceeded(rq, dl_se)) { | 616 | if (dl_runtime_exceeded(rq, dl_se)) { |
| 617 | dl_se->dl_throttled = 1; | ||
| 633 | __dequeue_task_dl(rq, curr, 0); | 618 | __dequeue_task_dl(rq, curr, 0); |
| 634 | if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) | 619 | if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted))) |
| 635 | dl_se->dl_throttled = 1; | ||
| 636 | else | ||
| 637 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); | 620 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); |
| 638 | 621 | ||
| 639 | if (!is_leftmost(curr, &rq->dl)) | 622 | if (!is_leftmost(curr, &rq->dl)) |
| @@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, | |||
| 826 | * parameters of the task might need updating. Otherwise, | 809 | * parameters of the task might need updating. Otherwise, |
| 827 | * we want a replenishment of its runtime. | 810 | * we want a replenishment of its runtime. |
| 828 | */ | 811 | */ |
| 829 | if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) | 812 | if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) |
| 830 | replenish_dl_entity(dl_se, pi_se); | ||
| 831 | else | ||
| 832 | update_dl_entity(dl_se, pi_se); | 813 | update_dl_entity(dl_se, pi_se); |
| 814 | else if (flags & ENQUEUE_REPLENISH) | ||
| 815 | replenish_dl_entity(dl_se, pi_se); | ||
| 833 | 816 | ||
| 834 | __enqueue_dl_entity(dl_se); | 817 | __enqueue_dl_entity(dl_se); |
| 835 | } | 818 | } |
| @@ -870,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 870 | * its rq, the bandwidth timer callback (which clearly has not | 853 | * its rq, the bandwidth timer callback (which clearly has not |
| 871 | * run yet) will take care of this. | 854 | * run yet) will take care of this. |
| 872 | */ | 855 | */ |
| 873 | if (p->dl.dl_throttled) | 856 | if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) |
| 874 | return; | 857 | return; |
| 875 | 858 | ||
| 876 | enqueue_dl_entity(&p->dl, pi_se, flags); | 859 | enqueue_dl_entity(&p->dl, pi_se, flags); |
| @@ -1090,7 +1073,13 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) | |||
| 1090 | { | 1073 | { |
| 1091 | update_curr_dl(rq); | 1074 | update_curr_dl(rq); |
| 1092 | 1075 | ||
| 1093 | if (hrtick_enabled(rq) && queued && p->dl.runtime > 0) | 1076 | /* |
| 1077 | * Even when we have runtime, update_curr_dl() might have resulted in us | ||
| 1078 | * not being the leftmost task anymore. In that case NEED_RESCHED will | ||
| 1079 | * be set and schedule() will start a new hrtick for the next task. | ||
| 1080 | */ | ||
| 1081 | if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && | ||
| 1082 | is_leftmost(p, &rq->dl)) | ||
| 1094 | start_hrtick_dl(rq, p); | 1083 | start_hrtick_dl(rq, p); |
| 1095 | } | 1084 | } |
| 1096 | 1085 | ||
| @@ -1111,6 +1100,7 @@ static void task_dead_dl(struct task_struct *p) | |||
| 1111 | * Since we are TASK_DEAD we won't slip out of the domain! | 1100 | * Since we are TASK_DEAD we won't slip out of the domain! |
| 1112 | */ | 1101 | */ |
| 1113 | raw_spin_lock_irq(&dl_b->lock); | 1102 | raw_spin_lock_irq(&dl_b->lock); |
| 1103 | /* XXX we should retain the bw until 0-lag */ | ||
| 1114 | dl_b->total_bw -= p->dl.dl_bw; | 1104 | dl_b->total_bw -= p->dl.dl_bw; |
| 1115 | raw_spin_unlock_irq(&dl_b->lock); | 1105 | raw_spin_unlock_irq(&dl_b->lock); |
| 1116 | 1106 | ||
| @@ -1182,9 +1172,6 @@ static int find_later_rq(struct task_struct *task) | |||
| 1182 | * We have to consider system topology and task affinity | 1172 | * We have to consider system topology and task affinity |
| 1183 | * first, then we can look for a suitable cpu. | 1173 | * first, then we can look for a suitable cpu. |
| 1184 | */ | 1174 | */ |
| 1185 | cpumask_copy(later_mask, task_rq(task)->rd->span); | ||
| 1186 | cpumask_and(later_mask, later_mask, cpu_active_mask); | ||
| 1187 | cpumask_and(later_mask, later_mask, &task->cpus_allowed); | ||
| 1188 | best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, | 1175 | best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, |
| 1189 | task, later_mask); | 1176 | task, later_mask); |
| 1190 | if (best_cpu == -1) | 1177 | if (best_cpu == -1) |
| @@ -1579,6 +1566,7 @@ static void rq_online_dl(struct rq *rq) | |||
| 1579 | if (rq->dl.overloaded) | 1566 | if (rq->dl.overloaded) |
| 1580 | dl_set_overload(rq); | 1567 | dl_set_overload(rq); |
| 1581 | 1568 | ||
| 1569 | cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); | ||
| 1582 | if (rq->dl.dl_nr_running > 0) | 1570 | if (rq->dl.dl_nr_running > 0) |
| 1583 | cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); | 1571 | cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); |
| 1584 | } | 1572 | } |
| @@ -1590,6 +1578,7 @@ static void rq_offline_dl(struct rq *rq) | |||
| 1590 | dl_clear_overload(rq); | 1578 | dl_clear_overload(rq); |
| 1591 | 1579 | ||
| 1592 | cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); | 1580 | cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); |
| 1581 | cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); | ||
| 1593 | } | 1582 | } |
| 1594 | 1583 | ||
| 1595 | void init_sched_dl_class(void) | 1584 | void init_sched_dl_class(void) |
| @@ -1631,8 +1620,8 @@ static void cancel_dl_timer(struct rq *rq, struct task_struct *p) | |||
| 1631 | 1620 | ||
| 1632 | static void switched_from_dl(struct rq *rq, struct task_struct *p) | 1621 | static void switched_from_dl(struct rq *rq, struct task_struct *p) |
| 1633 | { | 1622 | { |
| 1623 | /* XXX we should retain the bw until 0-lag */ | ||
| 1634 | cancel_dl_timer(rq, p); | 1624 | cancel_dl_timer(rq, p); |
| 1635 | |||
| 1636 | __dl_clear_params(p); | 1625 | __dl_clear_params(p); |
| 1637 | 1626 | ||
| 1638 | /* | 1627 | /* |
