aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c83
1 files changed, 41 insertions, 42 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 1ce8867283dc..37e2449186c4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -243,10 +243,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
244{ 244{
245 struct rq *later_rq = NULL; 245 struct rq *later_rq = NULL;
246 bool fallback = false;
247 246
248 later_rq = find_lock_later_rq(p, rq); 247 later_rq = find_lock_later_rq(p, rq);
249
250 if (!later_rq) { 248 if (!later_rq) {
251 int cpu; 249 int cpu;
252 250
@@ -254,7 +252,6 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
254 * If we cannot preempt any rq, fall back to pick any 252 * If we cannot preempt any rq, fall back to pick any
255 * online cpu. 253 * online cpu.
256 */ 254 */
257 fallback = true;
258 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); 255 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
259 if (cpu >= nr_cpu_ids) { 256 if (cpu >= nr_cpu_ids) {
260 /* 257 /*
@@ -274,16 +271,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
274 double_lock_balance(rq, later_rq); 271 double_lock_balance(rq, later_rq);
275 } 272 }
276 273
277 /*
278 * By now the task is replenished and enqueued; migrate it.
279 */
280 deactivate_task(rq, p, 0);
281 set_task_cpu(p, later_rq->cpu); 274 set_task_cpu(p, later_rq->cpu);
282 activate_task(later_rq, p, 0);
283
284 if (!fallback)
285 resched_curr(later_rq);
286
287 double_unlock_balance(later_rq, rq); 275 double_unlock_balance(later_rq, rq);
288 276
289 return later_rq; 277 return later_rq;
@@ -346,12 +334,12 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
346 * one, and to (try to!) reconcile itself with its own scheduling 334 * one, and to (try to!) reconcile itself with its own scheduling
347 * parameters. 335 * parameters.
348 */ 336 */
349static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, 337static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
350 struct sched_dl_entity *pi_se)
351{ 338{
352 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 339 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
353 struct rq *rq = rq_of_dl_rq(dl_rq); 340 struct rq *rq = rq_of_dl_rq(dl_rq);
354 341
342 WARN_ON(dl_se->dl_boosted);
355 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 343 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
356 344
357 /* 345 /*
@@ -367,8 +355,8 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
367 * future; in fact, we must consider execution overheads (time 355 * future; in fact, we must consider execution overheads (time
368 * spent on hardirq context, etc.). 356 * spent on hardirq context, etc.).
369 */ 357 */
370 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 358 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
371 dl_se->runtime = pi_se->dl_runtime; 359 dl_se->runtime = dl_se->dl_runtime;
372} 360}
373 361
374/* 362/*
@@ -641,29 +629,31 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
641 goto unlock; 629 goto unlock;
642 } 630 }
643 631
644 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
645 if (dl_task(rq->curr))
646 check_preempt_curr_dl(rq, p, 0);
647 else
648 resched_curr(rq);
649
650#ifdef CONFIG_SMP 632#ifdef CONFIG_SMP
651 /*
652 * Perform balancing operations here; after the replenishments. We
653 * cannot drop rq->lock before this, otherwise the assertion in
654 * start_dl_timer() about not missing updates is not true.
655 *
656 * If we find that the rq the task was on is no longer available, we
657 * need to select a new rq.
658 *
659 * XXX figure out if select_task_rq_dl() deals with offline cpus.
660 */
661 if (unlikely(!rq->online)) { 633 if (unlikely(!rq->online)) {
634 /*
635 * If the runqueue is no longer available, migrate the
636 * task elsewhere. This necessarily changes rq.
637 */
662 lockdep_unpin_lock(&rq->lock, rf.cookie); 638 lockdep_unpin_lock(&rq->lock, rf.cookie);
663 rq = dl_task_offline_migration(rq, p); 639 rq = dl_task_offline_migration(rq, p);
664 rf.cookie = lockdep_pin_lock(&rq->lock); 640 rf.cookie = lockdep_pin_lock(&rq->lock);
641
642 /*
643 * Now that the task has been migrated to the new RQ and we
644 * have that locked, proceed as normal and enqueue the task
645 * there.
646 */
665 } 647 }
648#endif
649
650 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
651 if (dl_task(rq->curr))
652 check_preempt_curr_dl(rq, p, 0);
653 else
654 resched_curr(rq);
666 655
656#ifdef CONFIG_SMP
667 /* 657 /*
668 * Queueing this task back might have overloaded rq, check if we need 658 * Queueing this task back might have overloaded rq, check if we need
669 * to kick someone away. 659 * to kick someone away.
@@ -735,9 +725,8 @@ static void update_curr_dl(struct rq *rq)
735 return; 725 return;
736 } 726 }
737 727
738 /* kick cpufreq (see the comment in linux/cpufreq.h). */ 728 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
739 if (cpu_of(rq) == smp_processor_id()) 729 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
740 cpufreq_trigger_update(rq_clock(rq));
741 730
742 schedstat_set(curr->se.statistics.exec_max, 731 schedstat_set(curr->se.statistics.exec_max,
743 max(curr->se.statistics.exec_max, delta_exec)); 732 max(curr->se.statistics.exec_max, delta_exec));
@@ -798,7 +787,7 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
798 if (dl_rq->earliest_dl.curr == 0 || 787 if (dl_rq->earliest_dl.curr == 0 ||
799 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 788 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
800 dl_rq->earliest_dl.curr = deadline; 789 dl_rq->earliest_dl.curr = deadline;
801 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); 790 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
802 } 791 }
803} 792}
804 793
@@ -813,14 +802,14 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
813 if (!dl_rq->dl_nr_running) { 802 if (!dl_rq->dl_nr_running) {
814 dl_rq->earliest_dl.curr = 0; 803 dl_rq->earliest_dl.curr = 0;
815 dl_rq->earliest_dl.next = 0; 804 dl_rq->earliest_dl.next = 0;
816 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 805 cpudl_clear(&rq->rd->cpudl, rq->cpu);
817 } else { 806 } else {
818 struct rb_node *leftmost = dl_rq->rb_leftmost; 807 struct rb_node *leftmost = dl_rq->rb_leftmost;
819 struct sched_dl_entity *entry; 808 struct sched_dl_entity *entry;
820 809
821 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 810 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
822 dl_rq->earliest_dl.curr = entry->deadline; 811 dl_rq->earliest_dl.curr = entry->deadline;
823 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); 812 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
824 } 813 }
825} 814}
826 815
@@ -1671,7 +1660,7 @@ static void rq_online_dl(struct rq *rq)
1671 1660
1672 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 1661 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1673 if (rq->dl.dl_nr_running > 0) 1662 if (rq->dl.dl_nr_running > 0)
1674 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); 1663 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1675} 1664}
1676 1665
1677/* Assumes rq->lock is held */ 1666/* Assumes rq->lock is held */
@@ -1680,7 +1669,7 @@ static void rq_offline_dl(struct rq *rq)
1680 if (rq->dl.overloaded) 1669 if (rq->dl.overloaded)
1681 dl_clear_overload(rq); 1670 dl_clear_overload(rq);
1682 1671
1683 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 1672 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1684 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 1673 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1685} 1674}
1686 1675
@@ -1723,10 +1712,20 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
1723 */ 1712 */
1724static void switched_to_dl(struct rq *rq, struct task_struct *p) 1713static void switched_to_dl(struct rq *rq, struct task_struct *p)
1725{ 1714{
1715
1716 /* If p is not queued we will update its parameters at next wakeup. */
1717 if (!task_on_rq_queued(p))
1718 return;
1719
1720 /*
1721 * If p is boosted we already updated its params in
1722 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
1723 * p's deadline being now already after rq_clock(rq).
1724 */
1726 if (dl_time_before(p->dl.deadline, rq_clock(rq))) 1725 if (dl_time_before(p->dl.deadline, rq_clock(rq)))
1727 setup_new_dl_entity(&p->dl, &p->dl); 1726 setup_new_dl_entity(&p->dl);
1728 1727
1729 if (task_on_rq_queued(p) && rq->curr != p) { 1728 if (rq->curr != p) {
1730#ifdef CONFIG_SMP 1729#ifdef CONFIG_SMP
1731 if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) 1730 if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
1732 queue_push_tasks(rq); 1731 queue_push_tasks(rq);