diff options
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r-- | kernel/sched/deadline.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 15cbc17fbf84..6e79b3faa4cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq) | |||
135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
136 | { | 136 | { |
137 | struct task_struct *p = dl_task_of(dl_se); | 137 | struct task_struct *p = dl_task_of(dl_se); |
138 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
139 | 138 | ||
140 | if (p->nr_cpus_allowed > 1) | 139 | if (p->nr_cpus_allowed > 1) |
141 | dl_rq->dl_nr_migratory++; | 140 | dl_rq->dl_nr_migratory++; |
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
146 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 145 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
147 | { | 146 | { |
148 | struct task_struct *p = dl_task_of(dl_se); | 147 | struct task_struct *p = dl_task_of(dl_se); |
149 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
150 | 148 | ||
151 | if (p->nr_cpus_allowed > 1) | 149 | if (p->nr_cpus_allowed > 1) |
152 | dl_rq->dl_nr_migratory--; | 150 | dl_rq->dl_nr_migratory--; |
@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | |||
564 | return 1; | 562 | return 1; |
565 | } | 563 | } |
566 | 564 | ||
565 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | ||
566 | |||
567 | /* | 567 | /* |
568 | * Update the current task's runtime statistics (provided it is still | 568 | * Update the current task's runtime statistics (provided it is still |
569 | * a -deadline task and has not been removed from the dl_rq). | 569 | * a -deadline task and has not been removed from the dl_rq). |
@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq) | |||
627 | struct rt_rq *rt_rq = &rq->rt; | 627 | struct rt_rq *rt_rq = &rq->rt; |
628 | 628 | ||
629 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 629 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
630 | rt_rq->rt_time += delta_exec; | ||
631 | /* | 630 | /* |
632 | * We'll let actual RT tasks worry about the overflow here, we | 631 | * We'll let actual RT tasks worry about the overflow here, we |
633 | * have our own CBS to keep us inline -- see above. | 632 | * have our own CBS to keep us inline; only account when RT |
633 | * bandwidth is relevant. | ||
634 | */ | 634 | */ |
635 | if (sched_rt_bandwidth_account(rt_rq)) | ||
636 | rt_rq->rt_time += delta_exec; | ||
635 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
636 | } | 638 | } |
637 | } | 639 | } |