diff options
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ee15f5a0d1c1..f4d4b077eba0 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -831,11 +831,14 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
831 | enqueue = 1; | 831 | enqueue = 1; |
832 | 832 | ||
833 | /* | 833 | /* |
834 | * Force a clock update if the CPU was idle, | 834 | * When we're idle and a woken (rt) task is |
835 | * lest wakeup -> unthrottle time accumulate. | 835 | * throttled check_preempt_curr() will set |
836 | * skip_update and the time between the wakeup | ||
837 | * and this unthrottle will get accounted as | ||
838 | * 'runtime'. | ||
836 | */ | 839 | */ |
837 | if (rt_rq->rt_nr_running && rq->curr == rq->idle) | 840 | if (rt_rq->rt_nr_running && rq->curr == rq->idle) |
838 | rq->skip_clock_update = -1; | 841 | rq_clock_skip_update(rq, false); |
839 | } | 842 | } |
840 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 843 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
841 | idle = 0; | 844 | idle = 0; |
@@ -1337,7 +1340,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) | |||
1337 | curr->prio <= p->prio)) { | 1340 | curr->prio <= p->prio)) { |
1338 | int target = find_lowest_rq(p); | 1341 | int target = find_lowest_rq(p); |
1339 | 1342 | ||
1340 | if (target != -1) | 1343 | /* |
1344 | * Don't bother moving it if the destination CPU is | ||
1345 | * not running a lower priority task. | ||
1346 | */ | ||
1347 | if (target != -1 && | ||
1348 | p->prio < cpu_rq(target)->rt.highest_prio.curr) | ||
1341 | cpu = target; | 1349 | cpu = target; |
1342 | } | 1350 | } |
1343 | rcu_read_unlock(); | 1351 | rcu_read_unlock(); |
@@ -1614,6 +1622,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1614 | 1622 | ||
1615 | lowest_rq = cpu_rq(cpu); | 1623 | lowest_rq = cpu_rq(cpu); |
1616 | 1624 | ||
1625 | if (lowest_rq->rt.highest_prio.curr <= task->prio) { | ||
1626 | /* | ||
1627 | * Target rq has tasks of equal or higher priority, | ||
1628 | * retrying does not release any lock and is unlikely | ||
1629 | * to yield a different result. | ||
1630 | */ | ||
1631 | lowest_rq = NULL; | ||
1632 | break; | ||
1633 | } | ||
1634 | |||
1617 | /* if the prio of this runqueue changed, try again */ | 1635 | /* if the prio of this runqueue changed, try again */ |
1618 | if (double_lock_balance(rq, lowest_rq)) { | 1636 | if (double_lock_balance(rq, lowest_rq)) { |
1619 | /* | 1637 | /* |