diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 30 |
1 files changed, 21 insertions, 9 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index db04161fe37..b827550a0d0 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -187,11 +187,23 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
187 | 187 | ||
188 | typedef struct task_group *rt_rq_iter_t; | 188 | typedef struct task_group *rt_rq_iter_t; |
189 | 189 | ||
190 | #define for_each_rt_rq(rt_rq, iter, rq) \ | 190 | static inline struct task_group *next_task_group(struct task_group *tg) |
191 | for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \ | 191 | { |
192 | (&iter->list != &task_groups) && \ | 192 | do { |
193 | (rt_rq = iter->rt_rq[cpu_of(rq)]); \ | 193 | tg = list_entry_rcu(tg->list.next, |
194 | iter = list_entry_rcu(iter->list.next, typeof(*iter), list)) | 194 | typeof(struct task_group), list); |
195 | } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); | ||
196 | |||
197 | if (&tg->list == &task_groups) | ||
198 | tg = NULL; | ||
199 | |||
200 | return tg; | ||
201 | } | ||
202 | |||
203 | #define for_each_rt_rq(rt_rq, iter, rq) \ | ||
204 | for (iter = container_of(&task_groups, typeof(*iter), list); \ | ||
205 | (iter = next_task_group(iter)) && \ | ||
206 | (rt_rq = iter->rt_rq[cpu_of(rq)]);) | ||
195 | 207 | ||
196 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | 208 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) |
197 | { | 209 | { |
@@ -1045,7 +1057,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
1045 | */ | 1057 | */ |
1046 | if (curr && unlikely(rt_task(curr)) && | 1058 | if (curr && unlikely(rt_task(curr)) && |
1047 | (curr->rt.nr_cpus_allowed < 2 || | 1059 | (curr->rt.nr_cpus_allowed < 2 || |
1048 | curr->prio < p->prio) && | 1060 | curr->prio <= p->prio) && |
1049 | (p->rt.nr_cpus_allowed > 1)) { | 1061 | (p->rt.nr_cpus_allowed > 1)) { |
1050 | int target = find_lowest_rq(p); | 1062 | int target = find_lowest_rq(p); |
1051 | 1063 | ||
@@ -1133,7 +1145,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
1133 | 1145 | ||
1134 | rt_rq = &rq->rt; | 1146 | rt_rq = &rq->rt; |
1135 | 1147 | ||
1136 | if (unlikely(!rt_rq->rt_nr_running)) | 1148 | if (!rt_rq->rt_nr_running) |
1137 | return NULL; | 1149 | return NULL; |
1138 | 1150 | ||
1139 | if (rt_rq_throttled(rt_rq)) | 1151 | if (rt_rq_throttled(rt_rq)) |
@@ -1555,7 +1567,7 @@ skip: | |||
1555 | static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) | 1567 | static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) |
1556 | { | 1568 | { |
1557 | /* Try to pull RT tasks here if we lower this rq's prio */ | 1569 | /* Try to pull RT tasks here if we lower this rq's prio */ |
1558 | if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) | 1570 | if (rq->rt.highest_prio.curr > prev->prio) |
1559 | pull_rt_task(rq); | 1571 | pull_rt_task(rq); |
1560 | } | 1572 | } |
1561 | 1573 | ||
@@ -1576,7 +1588,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
1576 | p->rt.nr_cpus_allowed > 1 && | 1588 | p->rt.nr_cpus_allowed > 1 && |
1577 | rt_task(rq->curr) && | 1589 | rt_task(rq->curr) && |
1578 | (rq->curr->rt.nr_cpus_allowed < 2 || | 1590 | (rq->curr->rt.nr_cpus_allowed < 2 || |
1579 | rq->curr->prio < p->prio)) | 1591 | rq->curr->prio <= p->prio)) |
1580 | push_rt_tasks(rq); | 1592 | push_rt_tasks(rq); |
1581 | } | 1593 | } |
1582 | 1594 | ||