diff options
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r-- | kernel/sched/deadline.c | 27 |
1 files changed, 25 insertions, 2 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0c6b1d089cd4..ee25361becdd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -63,10 +63,10 @@ void init_dl_bw(struct dl_bw *dl_b) | |||
63 | { | 63 | { |
64 | raw_spin_lock_init(&dl_b->lock); | 64 | raw_spin_lock_init(&dl_b->lock); |
65 | raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); | 65 | raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); |
66 | if (global_dl_runtime() == RUNTIME_INF) | 66 | if (global_rt_runtime() == RUNTIME_INF) |
67 | dl_b->bw = -1; | 67 | dl_b->bw = -1; |
68 | else | 68 | else |
69 | dl_b->bw = to_ratio(global_dl_period(), global_dl_runtime()); | 69 | dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); |
70 | raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); | 70 | raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); |
71 | dl_b->total_bw = 0; | 71 | dl_b->total_bw = 0; |
72 | } | 72 | } |
@@ -612,6 +612,29 @@ static void update_curr_dl(struct rq *rq) | |||
612 | if (!is_leftmost(curr, &rq->dl)) | 612 | if (!is_leftmost(curr, &rq->dl)) |
613 | resched_task(curr); | 613 | resched_task(curr); |
614 | } | 614 | } |
615 | |||
616 | /* | ||
617 | * Because -- for now -- we share the rt bandwidth, we need to | ||
618 | * account our runtime there too, otherwise actual rt tasks | ||
619 | * would be able to exceed the shared quota. | ||
620 | * | ||
621 | * Account to the root rt group for now. | ||
622 | * | ||
623 | * The solution we're working towards is having the RT groups scheduled | ||
624 | * using deadline servers -- however there's a few nasties to figure | ||
625 | * out before that can happen. | ||
626 | */ | ||
627 | if (rt_bandwidth_enabled()) { | ||
628 | struct rt_rq *rt_rq = &rq->rt; | ||
629 | |||
630 | raw_spin_lock(&rt_rq->rt_runtime_lock); | ||
631 | rt_rq->rt_time += delta_exec; | ||
632 | /* | ||
633 | * We'll let actual RT tasks worry about the overflow here, we | ||
634 | * have our own CBS to keep us inline -- see above. | ||
635 | */ | ||
636 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | ||
637 | } | ||
615 | } | 638 | } |
616 | 639 | ||
617 | #ifdef CONFIG_SMP | 640 | #ifdef CONFIG_SMP |