aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c77
1 files changed, 66 insertions, 11 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 3fa8fa6d9403..5e95145088fd 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -69,7 +69,7 @@ void init_dl_bw(struct dl_bw *dl_b)
69 dl_b->total_bw = 0; 69 dl_b->total_bw = 0;
70} 70}
71 71
72void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq) 72void init_dl_rq(struct dl_rq *dl_rq)
73{ 73{
74 dl_rq->rb_root = RB_ROOT; 74 dl_rq->rb_root = RB_ROOT;
75 75
@@ -218,6 +218,52 @@ static inline void set_post_schedule(struct rq *rq)
218 rq->post_schedule = has_pushable_dl_tasks(rq); 218 rq->post_schedule = has_pushable_dl_tasks(rq);
219} 219}
220 220
221static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
222
223static void dl_task_offline_migration(struct rq *rq, struct task_struct *p)
224{
225 struct rq *later_rq = NULL;
226 bool fallback = false;
227
228 later_rq = find_lock_later_rq(p, rq);
229
230 if (!later_rq) {
231 int cpu;
232
233 /*
234 * If we cannot preempt any rq, fall back to pick any
235 * online cpu.
236 */
237 fallback = true;
238 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
239 if (cpu >= nr_cpu_ids) {
240 /*
241 * Fail to find any suitable cpu.
242 * The task will never come back!
243 */
244 BUG_ON(dl_bandwidth_enabled());
245
246 /*
247 * If admission control is disabled we
248 * try a little harder to let the task
249 * run.
250 */
251 cpu = cpumask_any(cpu_active_mask);
252 }
253 later_rq = cpu_rq(cpu);
254 double_lock_balance(rq, later_rq);
255 }
256
257 deactivate_task(rq, p, 0);
258 set_task_cpu(p, later_rq->cpu);
259 activate_task(later_rq, p, ENQUEUE_REPLENISH);
260
261 if (!fallback)
262 resched_curr(later_rq);
263
264 double_unlock_balance(rq, later_rq);
265}
266
221#else 267#else
222 268
223static inline 269static inline
@@ -514,7 +560,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
514 unsigned long flags; 560 unsigned long flags;
515 struct rq *rq; 561 struct rq *rq;
516 562
517 rq = task_rq_lock(current, &flags); 563 rq = task_rq_lock(p, &flags);
518 564
519 /* 565 /*
520 * We need to take care of several possible races here: 566 * We need to take care of several possible races here:
@@ -536,6 +582,17 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
536 sched_clock_tick(); 582 sched_clock_tick();
537 update_rq_clock(rq); 583 update_rq_clock(rq);
538 584
585#ifdef CONFIG_SMP
586 /*
587 * If we find that the rq the task was on is no longer
588 * available, we need to select a new rq.
589 */
590 if (unlikely(!rq->online)) {
591 dl_task_offline_migration(rq, p);
592 goto unlock;
593 }
594#endif
595
539 /* 596 /*
540 * If the throttle happened during sched-out; like: 597 * If the throttle happened during sched-out; like:
541 * 598 *
@@ -569,7 +626,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
569 push_dl_task(rq); 626 push_dl_task(rq);
570#endif 627#endif
571unlock: 628unlock:
572 task_rq_unlock(rq, current, &flags); 629 task_rq_unlock(rq, p, &flags);
573 630
574 return HRTIMER_NORESTART; 631 return HRTIMER_NORESTART;
575} 632}
@@ -914,6 +971,12 @@ static void yield_task_dl(struct rq *rq)
914 } 971 }
915 update_rq_clock(rq); 972 update_rq_clock(rq);
916 update_curr_dl(rq); 973 update_curr_dl(rq);
974 /*
975 * Tell update_rq_clock() that we've just updated,
976 * so we don't do microscopic update in schedule()
977 * and double the fastpath cost.
978 */
979 rq_clock_skip_update(rq, true);
917} 980}
918 981
919#ifdef CONFIG_SMP 982#ifdef CONFIG_SMP
@@ -1659,14 +1722,6 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
1659{ 1722{
1660 int check_resched = 1; 1723 int check_resched = 1;
1661 1724
1662 /*
1663 * If p is throttled, don't consider the possibility
1664 * of preempting rq->curr, the check will be done right
1665 * after its runtime will get replenished.
1666 */
1667 if (unlikely(p->dl.dl_throttled))
1668 return;
1669
1670 if (task_on_rq_queued(p) && rq->curr != p) { 1725 if (task_on_rq_queued(p) && rq->curr != p) {
1671#ifdef CONFIG_SMP 1726#ifdef CONFIG_SMP
1672 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded && 1727 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&