aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 44b06d75416e..e4821593d4de 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -12,6 +12,9 @@ static inline int rt_overloaded(struct rq *rq)
12 12
13static inline void rt_set_overload(struct rq *rq) 13static inline void rt_set_overload(struct rq *rq)
14{ 14{
15 if (!rq->online)
16 return;
17
15 cpu_set(rq->cpu, rq->rd->rto_mask); 18 cpu_set(rq->cpu, rq->rd->rto_mask);
16 /* 19 /*
17 * Make sure the mask is visible before we set 20 * Make sure the mask is visible before we set
@@ -26,6 +29,9 @@ static inline void rt_set_overload(struct rq *rq)
26 29
27static inline void rt_clear_overload(struct rq *rq) 30static inline void rt_clear_overload(struct rq *rq)
28{ 31{
32 if (!rq->online)
33 return;
34
29 /* the order here really doesn't matter */ 35 /* the order here really doesn't matter */
30 atomic_dec(&rq->rd->rto_count); 36 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask); 37 cpu_clear(rq->cpu, rq->rd->rto_mask);
@@ -394,7 +400,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
394 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 400 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
395 struct rq *rq = rq_of_rt_rq(rt_rq); 401 struct rq *rq = rq_of_rt_rq(rt_rq);
396 rt_rq->highest_prio = rt_se_prio(rt_se); 402 rt_rq->highest_prio = rt_se_prio(rt_se);
397 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_se_prio(rt_se)); 403
404 if (rq->online)
405 cpupri_set(&rq->rd->cpupri, rq->cpu,
406 rt_se_prio(rt_se));
398 } 407 }
399#endif 408#endif
400#ifdef CONFIG_SMP 409#ifdef CONFIG_SMP
@@ -448,7 +457,10 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
448 457
449 if (rt_rq->highest_prio != highest_prio) { 458 if (rt_rq->highest_prio != highest_prio) {
450 struct rq *rq = rq_of_rt_rq(rt_rq); 459 struct rq *rq = rq_of_rt_rq(rt_rq);
451 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio); 460
461 if (rq->online)
462 cpupri_set(&rq->rd->cpupri, rq->cpu,
463 rt_rq->highest_prio);
452 } 464 }
453 465
454 update_rt_migration(rq_of_rt_rq(rt_rq)); 466 update_rt_migration(rq_of_rt_rq(rt_rq));
@@ -1154,7 +1166,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1154} 1166}
1155 1167
1156/* Assumes rq->lock is held */ 1168/* Assumes rq->lock is held */
1157static void join_domain_rt(struct rq *rq) 1169static void rq_online_rt(struct rq *rq)
1158{ 1170{
1159 if (rq->rt.overloaded) 1171 if (rq->rt.overloaded)
1160 rt_set_overload(rq); 1172 rt_set_overload(rq);
@@ -1163,7 +1175,7 @@ static void join_domain_rt(struct rq *rq)
1163} 1175}
1164 1176
1165/* Assumes rq->lock is held */ 1177/* Assumes rq->lock is held */
1166static void leave_domain_rt(struct rq *rq) 1178static void rq_offline_rt(struct rq *rq)
1167{ 1179{
1168 if (rq->rt.overloaded) 1180 if (rq->rt.overloaded)
1169 rt_clear_overload(rq); 1181 rt_clear_overload(rq);
@@ -1331,8 +1343,8 @@ static const struct sched_class rt_sched_class = {
1331 .load_balance = load_balance_rt, 1343 .load_balance = load_balance_rt,
1332 .move_one_task = move_one_task_rt, 1344 .move_one_task = move_one_task_rt,
1333 .set_cpus_allowed = set_cpus_allowed_rt, 1345 .set_cpus_allowed = set_cpus_allowed_rt,
1334 .join_domain = join_domain_rt, 1346 .rq_online = rq_online_rt,
1335 .leave_domain = leave_domain_rt, 1347 .rq_offline = rq_offline_rt,
1336 .pre_schedule = pre_schedule_rt, 1348 .pre_schedule = pre_schedule_rt,
1337 .post_schedule = post_schedule_rt, 1349 .post_schedule = post_schedule_rt,
1338 .task_wake_up = task_wake_up_rt, 1350 .task_wake_up = task_wake_up_rt,