aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0dd5e0971a07..15cbc17fbf84 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq)
121 121
122static void update_dl_migration(struct dl_rq *dl_rq) 122static void update_dl_migration(struct dl_rq *dl_rq)
123{ 123{
124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { 124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
125 if (!dl_rq->overloaded) { 125 if (!dl_rq->overloaded) {
126 dl_set_overload(rq_of_dl_rq(dl_rq)); 126 dl_set_overload(rq_of_dl_rq(dl_rq));
127 dl_rq->overloaded = 1; 127 dl_rq->overloaded = 1;
@@ -137,7 +137,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
137 struct task_struct *p = dl_task_of(dl_se); 137 struct task_struct *p = dl_task_of(dl_se);
138 dl_rq = &rq_of_dl_rq(dl_rq)->dl; 138 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
139 139
140 dl_rq->dl_nr_total++;
141 if (p->nr_cpus_allowed > 1) 140 if (p->nr_cpus_allowed > 1)
142 dl_rq->dl_nr_migratory++; 141 dl_rq->dl_nr_migratory++;
143 142
@@ -149,7 +148,6 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
149 struct task_struct *p = dl_task_of(dl_se); 148 struct task_struct *p = dl_task_of(dl_se);
150 dl_rq = &rq_of_dl_rq(dl_rq)->dl; 149 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
151 150
152 dl_rq->dl_nr_total--;
153 if (p->nr_cpus_allowed > 1) 151 if (p->nr_cpus_allowed > 1)
154 dl_rq->dl_nr_migratory--; 152 dl_rq->dl_nr_migratory--;
155 153
@@ -717,6 +715,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
717 715
718 WARN_ON(!dl_prio(prio)); 716 WARN_ON(!dl_prio(prio));
719 dl_rq->dl_nr_running++; 717 dl_rq->dl_nr_running++;
718 inc_nr_running(rq_of_dl_rq(dl_rq));
720 719
721 inc_dl_deadline(dl_rq, deadline); 720 inc_dl_deadline(dl_rq, deadline);
722 inc_dl_migration(dl_se, dl_rq); 721 inc_dl_migration(dl_se, dl_rq);
@@ -730,6 +729,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
730 WARN_ON(!dl_prio(prio)); 729 WARN_ON(!dl_prio(prio));
731 WARN_ON(!dl_rq->dl_nr_running); 730 WARN_ON(!dl_rq->dl_nr_running);
732 dl_rq->dl_nr_running--; 731 dl_rq->dl_nr_running--;
732 dec_nr_running(rq_of_dl_rq(dl_rq));
733 733
734 dec_dl_deadline(dl_rq, dl_se->deadline); 734 dec_dl_deadline(dl_rq, dl_se->deadline);
735 dec_dl_migration(dl_se, dl_rq); 735 dec_dl_migration(dl_se, dl_rq);
@@ -836,8 +836,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
836 836
837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
838 enqueue_pushable_dl_task(rq, p); 838 enqueue_pushable_dl_task(rq, p);
839
840 inc_nr_running(rq);
841} 839}
842 840
843static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 841static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -850,8 +848,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
850{ 848{
851 update_curr_dl(rq); 849 update_curr_dl(rq);
852 __dequeue_task_dl(rq, p, flags); 850 __dequeue_task_dl(rq, p, flags);
853
854 dec_nr_running(rq);
855} 851}
856 852
857/* 853/*