diff options
| -rw-r--r-- | kernel/sched_rt.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index ad6267714c8..01f75a5f17a 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | |||
| 210 | 210 | ||
| 211 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 211 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
| 212 | { | 212 | { |
| 213 | int this_cpu = smp_processor_id(); | ||
| 214 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 213 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
| 215 | struct sched_rt_entity *rt_se; | 214 | struct sched_rt_entity *rt_se; |
| 216 | 215 | ||
| 217 | rt_se = rt_rq->tg->rt_se[this_cpu]; | 216 | int cpu = cpu_of(rq_of_rt_rq(rt_rq)); |
| 217 | |||
| 218 | rt_se = rt_rq->tg->rt_se[cpu]; | ||
| 218 | 219 | ||
| 219 | if (rt_rq->rt_nr_running) { | 220 | if (rt_rq->rt_nr_running) { |
| 220 | if (rt_se && !on_rt_rq(rt_se)) | 221 | if (rt_se && !on_rt_rq(rt_se)) |
| @@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
| 226 | 227 | ||
| 227 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 228 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
| 228 | { | 229 | { |
| 229 | int this_cpu = smp_processor_id(); | ||
| 230 | struct sched_rt_entity *rt_se; | 230 | struct sched_rt_entity *rt_se; |
| 231 | int cpu = cpu_of(rq_of_rt_rq(rt_rq)); | ||
| 231 | 232 | ||
| 232 | rt_se = rt_rq->tg->rt_se[this_cpu]; | 233 | rt_se = rt_rq->tg->rt_se[cpu]; |
| 233 | 234 | ||
| 234 | if (rt_se && on_rt_rq(rt_se)) | 235 | if (rt_se && on_rt_rq(rt_se)) |
| 235 | dequeue_rt_entity(rt_se); | 236 | dequeue_rt_entity(rt_se); |
| @@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
| 565 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 566 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
| 566 | idle = 0; | 567 | idle = 0; |
| 567 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 568 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
| 568 | } else if (rt_rq->rt_nr_running) | 569 | } else if (rt_rq->rt_nr_running) { |
| 569 | idle = 0; | 570 | idle = 0; |
| 571 | if (!rt_rq_throttled(rt_rq)) | ||
| 572 | enqueue = 1; | ||
| 573 | } | ||
| 570 | 574 | ||
| 571 | if (enqueue) | 575 | if (enqueue) |
| 572 | sched_rt_rq_enqueue(rt_rq); | 576 | sched_rt_rq_enqueue(rt_rq); |
