aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 88725c939e0b..97540f0c9e47 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -185,11 +185,23 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
185 185
186typedef struct task_group *rt_rq_iter_t; 186typedef struct task_group *rt_rq_iter_t;
187 187
188#define for_each_rt_rq(rt_rq, iter, rq) \ 188static inline struct task_group *next_task_group(struct task_group *tg)
189 for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \ 189{
190 (&iter->list != &task_groups) && \ 190 do {
191 (rt_rq = iter->rt_rq[cpu_of(rq)]); \ 191 tg = list_entry_rcu(tg->list.next,
192 iter = list_entry_rcu(iter->list.next, typeof(*iter), list)) 192 typeof(struct task_group), list);
193 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
194
195 if (&tg->list == &task_groups)
196 tg = NULL;
197
198 return tg;
199}
200
201#define for_each_rt_rq(rt_rq, iter, rq) \
202 for (iter = container_of(&task_groups, typeof(*iter), list); \
203 (iter = next_task_group(iter)) && \
204 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
193 205
194static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) 206static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
195{ 207{
@@ -1096,7 +1108,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
1096 * to move current somewhere else, making room for our non-migratable 1108 * to move current somewhere else, making room for our non-migratable
1097 * task. 1109 * task.
1098 */ 1110 */
1099 if (p->prio == rq->curr->prio && !need_resched()) 1111 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1100 check_preempt_equal_prio(rq, p); 1112 check_preempt_equal_prio(rq, p);
1101#endif 1113#endif
1102} 1114}
@@ -1126,7 +1138,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
1126 1138
1127 rt_rq = &rq->rt; 1139 rt_rq = &rq->rt;
1128 1140
1129 if (unlikely(!rt_rq->rt_nr_running)) 1141 if (!rt_rq->rt_nr_running)
1130 return NULL; 1142 return NULL;
1131 1143
1132 if (rt_rq_throttled(rt_rq)) 1144 if (rt_rq_throttled(rt_rq))
@@ -1239,6 +1251,10 @@ static int find_lowest_rq(struct task_struct *task)
1239 int this_cpu = smp_processor_id(); 1251 int this_cpu = smp_processor_id();
1240 int cpu = task_cpu(task); 1252 int cpu = task_cpu(task);
1241 1253
1254 /* Make sure the mask is initialized first */
1255 if (unlikely(!lowest_mask))
1256 return -1;
1257
1242 if (task->rt.nr_cpus_allowed == 1) 1258 if (task->rt.nr_cpus_allowed == 1)
1243 return -1; /* No other targets possible */ 1259 return -1; /* No other targets possible */
1244 1260
@@ -1544,7 +1560,7 @@ skip:
1544static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1560static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1545{ 1561{
1546 /* Try to pull RT tasks here if we lower this rq's prio */ 1562 /* Try to pull RT tasks here if we lower this rq's prio */
1547 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) 1563 if (rq->rt.highest_prio.curr > prev->prio)
1548 pull_rt_task(rq); 1564 pull_rt_task(rq);
1549} 1565}
1550 1566