aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0dd5e0971a07..6e79b3faa4cd 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq)
121 121
122static void update_dl_migration(struct dl_rq *dl_rq) 122static void update_dl_migration(struct dl_rq *dl_rq)
123{ 123{
124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { 124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
125 if (!dl_rq->overloaded) { 125 if (!dl_rq->overloaded) {
126 dl_set_overload(rq_of_dl_rq(dl_rq)); 126 dl_set_overload(rq_of_dl_rq(dl_rq));
127 dl_rq->overloaded = 1; 127 dl_rq->overloaded = 1;
@@ -135,9 +135,7 @@ static void update_dl_migration(struct dl_rq *dl_rq)
135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
136{ 136{
137 struct task_struct *p = dl_task_of(dl_se); 137 struct task_struct *p = dl_task_of(dl_se);
138 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
139 138
140 dl_rq->dl_nr_total++;
141 if (p->nr_cpus_allowed > 1) 139 if (p->nr_cpus_allowed > 1)
142 dl_rq->dl_nr_migratory++; 140 dl_rq->dl_nr_migratory++;
143 141
@@ -147,9 +145,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
147static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 145static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
148{ 146{
149 struct task_struct *p = dl_task_of(dl_se); 147 struct task_struct *p = dl_task_of(dl_se);
150 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
151 148
152 dl_rq->dl_nr_total--;
153 if (p->nr_cpus_allowed > 1) 149 if (p->nr_cpus_allowed > 1)
154 dl_rq->dl_nr_migratory--; 150 dl_rq->dl_nr_migratory--;
155 151
@@ -566,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
566 return 1; 562 return 1;
567} 563}
568 564
565extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
566
569/* 567/*
570 * Update the current task's runtime statistics (provided it is still 568 * Update the current task's runtime statistics (provided it is still
571 * a -deadline task and has not been removed from the dl_rq). 569 * a -deadline task and has not been removed from the dl_rq).
@@ -629,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
629 struct rt_rq *rt_rq = &rq->rt; 627 struct rt_rq *rt_rq = &rq->rt;
630 628
631 raw_spin_lock(&rt_rq->rt_runtime_lock); 629 raw_spin_lock(&rt_rq->rt_runtime_lock);
632 rt_rq->rt_time += delta_exec;
633 /* 630 /*
634 * We'll let actual RT tasks worry about the overflow here, we 631 * We'll let actual RT tasks worry about the overflow here, we
635 * have our own CBS to keep us inline -- see above. 632 * have our own CBS to keep us inline; only account when RT
633 * bandwidth is relevant.
636 */ 634 */
635 if (sched_rt_bandwidth_account(rt_rq))
636 rt_rq->rt_time += delta_exec;
637 raw_spin_unlock(&rt_rq->rt_runtime_lock); 637 raw_spin_unlock(&rt_rq->rt_runtime_lock);
638 } 638 }
639} 639}
@@ -717,6 +717,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
717 717
718 WARN_ON(!dl_prio(prio)); 718 WARN_ON(!dl_prio(prio));
719 dl_rq->dl_nr_running++; 719 dl_rq->dl_nr_running++;
720 inc_nr_running(rq_of_dl_rq(dl_rq));
720 721
721 inc_dl_deadline(dl_rq, deadline); 722 inc_dl_deadline(dl_rq, deadline);
722 inc_dl_migration(dl_se, dl_rq); 723 inc_dl_migration(dl_se, dl_rq);
@@ -730,6 +731,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
730 WARN_ON(!dl_prio(prio)); 731 WARN_ON(!dl_prio(prio));
731 WARN_ON(!dl_rq->dl_nr_running); 732 WARN_ON(!dl_rq->dl_nr_running);
732 dl_rq->dl_nr_running--; 733 dl_rq->dl_nr_running--;
734 dec_nr_running(rq_of_dl_rq(dl_rq));
733 735
734 dec_dl_deadline(dl_rq, dl_se->deadline); 736 dec_dl_deadline(dl_rq, dl_se->deadline);
735 dec_dl_migration(dl_se, dl_rq); 737 dec_dl_migration(dl_se, dl_rq);
@@ -836,8 +838,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
836 838
837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 839 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
838 enqueue_pushable_dl_task(rq, p); 840 enqueue_pushable_dl_task(rq, p);
839
840 inc_nr_running(rq);
841} 841}
842 842
843static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 843static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -850,8 +850,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
850{ 850{
851 update_curr_dl(rq); 851 update_curr_dl(rq);
852 __dequeue_task_dl(rq, p, flags); 852 __dequeue_task_dl(rq, p, flags);
853
854 dec_nr_running(rq);
855} 853}
856 854
857/* 855/*