aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c66
1 files changed, 47 insertions, 19 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 15cbc17fbf84..27ef40925525 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq)
135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
136{ 136{
137 struct task_struct *p = dl_task_of(dl_se); 137 struct task_struct *p = dl_task_of(dl_se);
138 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
139 138
140 if (p->nr_cpus_allowed > 1) 139 if (p->nr_cpus_allowed > 1)
141 dl_rq->dl_nr_migratory++; 140 dl_rq->dl_nr_migratory++;
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
146static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 145static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
147{ 146{
148 struct task_struct *p = dl_task_of(dl_se); 147 struct task_struct *p = dl_task_of(dl_se);
149 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
150 148
151 if (p->nr_cpus_allowed > 1) 149 if (p->nr_cpus_allowed > 1)
152 dl_rq->dl_nr_migratory--; 150 dl_rq->dl_nr_migratory--;
@@ -212,6 +210,16 @@ static inline int has_pushable_dl_tasks(struct rq *rq)
212 210
213static int push_dl_task(struct rq *rq); 211static int push_dl_task(struct rq *rq);
214 212
213static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
214{
215 return dl_task(prev);
216}
217
218static inline void set_post_schedule(struct rq *rq)
219{
220 rq->post_schedule = has_pushable_dl_tasks(rq);
221}
222
215#else 223#else
216 224
217static inline 225static inline
@@ -234,6 +242,19 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
234{ 242{
235} 243}
236 244
245static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
246{
247 return false;
248}
249
250static inline int pull_dl_task(struct rq *rq)
251{
252 return 0;
253}
254
255static inline void set_post_schedule(struct rq *rq)
256{
257}
237#endif /* CONFIG_SMP */ 258#endif /* CONFIG_SMP */
238 259
239static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 260static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
@@ -564,6 +585,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
564 return 1; 585 return 1;
565} 586}
566 587
588extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
589
567/* 590/*
568 * Update the current task's runtime statistics (provided it is still 591 * Update the current task's runtime statistics (provided it is still
569 * a -deadline task and has not been removed from the dl_rq). 592 * a -deadline task and has not been removed from the dl_rq).
@@ -586,8 +609,8 @@ static void update_curr_dl(struct rq *rq)
586 * approach need further study. 609 * approach need further study.
587 */ 610 */
588 delta_exec = rq_clock_task(rq) - curr->se.exec_start; 611 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
589 if (unlikely((s64)delta_exec < 0)) 612 if (unlikely((s64)delta_exec <= 0))
590 delta_exec = 0; 613 return;
591 614
592 schedstat_set(curr->se.statistics.exec_max, 615 schedstat_set(curr->se.statistics.exec_max,
593 max(curr->se.statistics.exec_max, delta_exec)); 616 max(curr->se.statistics.exec_max, delta_exec));
@@ -627,11 +650,13 @@ static void update_curr_dl(struct rq *rq)
627 struct rt_rq *rt_rq = &rq->rt; 650 struct rt_rq *rt_rq = &rq->rt;
628 651
629 raw_spin_lock(&rt_rq->rt_runtime_lock); 652 raw_spin_lock(&rt_rq->rt_runtime_lock);
630 rt_rq->rt_time += delta_exec;
631 /* 653 /*
632 * We'll let actual RT tasks worry about the overflow here, we 654 * We'll let actual RT tasks worry about the overflow here, we
633 * have our own CBS to keep us inline -- see above. 655 * have our own CBS to keep us inline; only account when RT
656 * bandwidth is relevant.
634 */ 657 */
658 if (sched_rt_bandwidth_account(rt_rq))
659 rt_rq->rt_time += delta_exec;
635 raw_spin_unlock(&rt_rq->rt_runtime_lock); 660 raw_spin_unlock(&rt_rq->rt_runtime_lock);
636 } 661 }
637} 662}
@@ -940,6 +965,8 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
940 resched_task(rq->curr); 965 resched_task(rq->curr);
941} 966}
942 967
968static int pull_dl_task(struct rq *this_rq);
969
943#endif /* CONFIG_SMP */ 970#endif /* CONFIG_SMP */
944 971
945/* 972/*
@@ -986,7 +1013,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
986 return rb_entry(left, struct sched_dl_entity, rb_node); 1013 return rb_entry(left, struct sched_dl_entity, rb_node);
987} 1014}
988 1015
989struct task_struct *pick_next_task_dl(struct rq *rq) 1016struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
990{ 1017{
991 struct sched_dl_entity *dl_se; 1018 struct sched_dl_entity *dl_se;
992 struct task_struct *p; 1019 struct task_struct *p;
@@ -994,9 +1021,20 @@ struct task_struct *pick_next_task_dl(struct rq *rq)
994 1021
995 dl_rq = &rq->dl; 1022 dl_rq = &rq->dl;
996 1023
1024 if (need_pull_dl_task(rq, prev))
1025 pull_dl_task(rq);
1026 /*
1027 * When prev is DL, we may throttle it in put_prev_task().
1028 * So, we update time before we check for dl_nr_running.
1029 */
1030 if (prev->sched_class == &dl_sched_class)
1031 update_curr_dl(rq);
1032
997 if (unlikely(!dl_rq->dl_nr_running)) 1033 if (unlikely(!dl_rq->dl_nr_running))
998 return NULL; 1034 return NULL;
999 1035
1036 put_prev_task(rq, prev);
1037
1000 dl_se = pick_next_dl_entity(rq, dl_rq); 1038 dl_se = pick_next_dl_entity(rq, dl_rq);
1001 BUG_ON(!dl_se); 1039 BUG_ON(!dl_se);
1002 1040
@@ -1011,9 +1049,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq)
1011 start_hrtick_dl(rq, p); 1049 start_hrtick_dl(rq, p);
1012#endif 1050#endif
1013 1051
1014#ifdef CONFIG_SMP 1052 set_post_schedule(rq);
1015 rq->post_schedule = has_pushable_dl_tasks(rq);
1016#endif /* CONFIG_SMP */
1017 1053
1018 return p; 1054 return p;
1019} 1055}
@@ -1422,13 +1458,6 @@ skip:
1422 return ret; 1458 return ret;
1423} 1459}
1424 1460
1425static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
1426{
1427 /* Try to pull other tasks here */
1428 if (dl_task(prev))
1429 pull_dl_task(rq);
1430}
1431
1432static void post_schedule_dl(struct rq *rq) 1461static void post_schedule_dl(struct rq *rq)
1433{ 1462{
1434 push_dl_tasks(rq); 1463 push_dl_tasks(rq);
@@ -1556,7 +1585,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
1556 if (unlikely(p->dl.dl_throttled)) 1585 if (unlikely(p->dl.dl_throttled))
1557 return; 1586 return;
1558 1587
1559 if (p->on_rq || rq->curr != p) { 1588 if (p->on_rq && rq->curr != p) {
1560#ifdef CONFIG_SMP 1589#ifdef CONFIG_SMP
1561 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) 1590 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1562 /* Only reschedule if pushing failed */ 1591 /* Only reschedule if pushing failed */
@@ -1621,7 +1650,6 @@ const struct sched_class dl_sched_class = {
1621 .set_cpus_allowed = set_cpus_allowed_dl, 1650 .set_cpus_allowed = set_cpus_allowed_dl,
1622 .rq_online = rq_online_dl, 1651 .rq_online = rq_online_dl,
1623 .rq_offline = rq_offline_dl, 1652 .rq_offline = rq_offline_dl,
1624 .pre_schedule = pre_schedule_dl,
1625 .post_schedule = post_schedule_dl, 1653 .post_schedule = post_schedule_dl,
1626 .task_woken = task_woken_dl, 1654 .task_woken = task_woken_dl,
1627#endif 1655#endif