diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 33 |
1 files changed, 19 insertions, 14 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index ad6267714c84..db308cb08b75 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | |||
210 | 210 | ||
211 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 211 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
212 | { | 212 | { |
213 | int this_cpu = smp_processor_id(); | ||
214 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 213 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
215 | struct sched_rt_entity *rt_se; | 214 | struct sched_rt_entity *rt_se; |
216 | 215 | ||
217 | rt_se = rt_rq->tg->rt_se[this_cpu]; | 216 | int cpu = cpu_of(rq_of_rt_rq(rt_rq)); |
217 | |||
218 | rt_se = rt_rq->tg->rt_se[cpu]; | ||
218 | 219 | ||
219 | if (rt_rq->rt_nr_running) { | 220 | if (rt_rq->rt_nr_running) { |
220 | if (rt_se && !on_rt_rq(rt_se)) | 221 | if (rt_se && !on_rt_rq(rt_se)) |
@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
226 | 227 | ||
227 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 228 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
228 | { | 229 | { |
229 | int this_cpu = smp_processor_id(); | ||
230 | struct sched_rt_entity *rt_se; | 230 | struct sched_rt_entity *rt_se; |
231 | int cpu = cpu_of(rq_of_rt_rq(rt_rq)); | ||
231 | 232 | ||
232 | rt_se = rt_rq->tg->rt_se[this_cpu]; | 233 | rt_se = rt_rq->tg->rt_se[cpu]; |
233 | 234 | ||
234 | if (rt_se && on_rt_rq(rt_se)) | 235 | if (rt_se && on_rt_rq(rt_se)) |
235 | dequeue_rt_entity(rt_se); | 236 | dequeue_rt_entity(rt_se); |
@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
565 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 566 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
566 | idle = 0; | 567 | idle = 0; |
567 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 568 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
568 | } else if (rt_rq->rt_nr_running) | 569 | } else if (rt_rq->rt_nr_running) { |
569 | idle = 0; | 570 | idle = 0; |
571 | if (!rt_rq_throttled(rt_rq)) | ||
572 | enqueue = 1; | ||
573 | } | ||
570 | 574 | ||
571 | if (enqueue) | 575 | if (enqueue) |
572 | sched_rt_rq_enqueue(rt_rq); | 576 | sched_rt_rq_enqueue(rt_rq); |
@@ -1595,8 +1599,7 @@ static void rq_offline_rt(struct rq *rq) | |||
1595 | * When switch from the rt queue, we bring ourselves to a position | 1599 | * When switch from the rt queue, we bring ourselves to a position |
1596 | * that we might want to pull RT tasks from other runqueues. | 1600 | * that we might want to pull RT tasks from other runqueues. |
1597 | */ | 1601 | */ |
1598 | static void switched_from_rt(struct rq *rq, struct task_struct *p, | 1602 | static void switched_from_rt(struct rq *rq, struct task_struct *p) |
1599 | int running) | ||
1600 | { | 1603 | { |
1601 | /* | 1604 | /* |
1602 | * If there are other RT tasks then we will reschedule | 1605 | * If there are other RT tasks then we will reschedule |
@@ -1605,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p, | |||
1605 | * we may need to handle the pulling of RT tasks | 1608 | * we may need to handle the pulling of RT tasks |
1606 | * now. | 1609 | * now. |
1607 | */ | 1610 | */ |
1608 | if (!rq->rt.rt_nr_running) | 1611 | if (p->se.on_rq && !rq->rt.rt_nr_running) |
1609 | pull_rt_task(rq); | 1612 | pull_rt_task(rq); |
1610 | } | 1613 | } |
1611 | 1614 | ||
@@ -1624,8 +1627,7 @@ static inline void init_sched_rt_class(void) | |||
1624 | * with RT tasks. In this case we try to push them off to | 1627 | * with RT tasks. In this case we try to push them off to |
1625 | * other runqueues. | 1628 | * other runqueues. |
1626 | */ | 1629 | */ |
1627 | static void switched_to_rt(struct rq *rq, struct task_struct *p, | 1630 | static void switched_to_rt(struct rq *rq, struct task_struct *p) |
1628 | int running) | ||
1629 | { | 1631 | { |
1630 | int check_resched = 1; | 1632 | int check_resched = 1; |
1631 | 1633 | ||
@@ -1636,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p, | |||
1636 | * If that current running task is also an RT task | 1638 | * If that current running task is also an RT task |
1637 | * then see if we can move to another run queue. | 1639 | * then see if we can move to another run queue. |
1638 | */ | 1640 | */ |
1639 | if (!running) { | 1641 | if (p->se.on_rq && rq->curr != p) { |
1640 | #ifdef CONFIG_SMP | 1642 | #ifdef CONFIG_SMP |
1641 | if (rq->rt.overloaded && push_rt_task(rq) && | 1643 | if (rq->rt.overloaded && push_rt_task(rq) && |
1642 | /* Don't resched if we changed runqueues */ | 1644 | /* Don't resched if we changed runqueues */ |
@@ -1652,10 +1654,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p, | |||
1652 | * Priority of the task has changed. This may cause | 1654 | * Priority of the task has changed. This may cause |
1653 | * us to initiate a push or pull. | 1655 | * us to initiate a push or pull. |
1654 | */ | 1656 | */ |
1655 | static void prio_changed_rt(struct rq *rq, struct task_struct *p, | 1657 | static void |
1656 | int oldprio, int running) | 1658 | prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) |
1657 | { | 1659 | { |
1658 | if (running) { | 1660 | if (!p->se.on_rq) |
1661 | return; | ||
1662 | |||
1663 | if (rq->curr == p) { | ||
1659 | #ifdef CONFIG_SMP | 1664 | #ifdef CONFIG_SMP |
1660 | /* | 1665 | /* |
1661 | * If our priority decreases while running, we | 1666 | * If our priority decreases while running, we |