aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-06-04 15:04:05 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-06 09:19:42 -0400
commit1f11eb6a8bc92536d9e93ead48fa3ffbd1478571 (patch)
tree40a123bd566bab8ddc726303e2725bae55b1a499 /kernel/sched_rt.c
parent099f98c8a1f13501a98afbfff4756395a610581c (diff)
sched: fix cpupri hotplug support
The RT folks over at RedHat found an issue w.r.t. hotplug support which was traced to problems with the cpupri infrastructure in the scheduler: https://bugzilla.redhat.com/show_bug.cgi?id=449676 This bug affects 23-rt12+, 24-rtX, 25-rtX, and sched-devel. This patch applies to 25.4-rt4, though it should trivially apply to most cpupri enabled kernels mentioned above. It turned out that the issue was that offline cpus could get inadvertently registered with cpupri so that they were erroneously selected during migration decisions. The end result would be an OOPS as the offline cpu had tasks routed to it. This patch generalizes the old join/leave domain interface into an online/offline interface, and adjusts the root-domain/hotplug code to utilize it. I was able to easily reproduce the issue prior to this patch, and am no longer able to reproduce it after this patch. I can offline cpus indefinately and everything seems to be in working order. Thanks to Arnaldo (acme), Thomas, and Peter for doing the legwork to point me in the right direction. Also thank you to Peter for reviewing the early iterations of this patch. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 44b06d75416e..e4821593d4de 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -12,6 +12,9 @@ static inline int rt_overloaded(struct rq *rq)
12 12
13static inline void rt_set_overload(struct rq *rq) 13static inline void rt_set_overload(struct rq *rq)
14{ 14{
15 if (!rq->online)
16 return;
17
15 cpu_set(rq->cpu, rq->rd->rto_mask); 18 cpu_set(rq->cpu, rq->rd->rto_mask);
16 /* 19 /*
17 * Make sure the mask is visible before we set 20 * Make sure the mask is visible before we set
@@ -26,6 +29,9 @@ static inline void rt_set_overload(struct rq *rq)
26 29
27static inline void rt_clear_overload(struct rq *rq) 30static inline void rt_clear_overload(struct rq *rq)
28{ 31{
32 if (!rq->online)
33 return;
34
29 /* the order here really doesn't matter */ 35 /* the order here really doesn't matter */
30 atomic_dec(&rq->rd->rto_count); 36 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask); 37 cpu_clear(rq->cpu, rq->rd->rto_mask);
@@ -394,7 +400,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
394 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 400 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
395 struct rq *rq = rq_of_rt_rq(rt_rq); 401 struct rq *rq = rq_of_rt_rq(rt_rq);
396 rt_rq->highest_prio = rt_se_prio(rt_se); 402 rt_rq->highest_prio = rt_se_prio(rt_se);
397 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_se_prio(rt_se)); 403
404 if (rq->online)
405 cpupri_set(&rq->rd->cpupri, rq->cpu,
406 rt_se_prio(rt_se));
398 } 407 }
399#endif 408#endif
400#ifdef CONFIG_SMP 409#ifdef CONFIG_SMP
@@ -448,7 +457,10 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
448 457
449 if (rt_rq->highest_prio != highest_prio) { 458 if (rt_rq->highest_prio != highest_prio) {
450 struct rq *rq = rq_of_rt_rq(rt_rq); 459 struct rq *rq = rq_of_rt_rq(rt_rq);
451 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio); 460
461 if (rq->online)
462 cpupri_set(&rq->rd->cpupri, rq->cpu,
463 rt_rq->highest_prio);
452 } 464 }
453 465
454 update_rt_migration(rq_of_rt_rq(rt_rq)); 466 update_rt_migration(rq_of_rt_rq(rt_rq));
@@ -1154,7 +1166,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1154} 1166}
1155 1167
1156/* Assumes rq->lock is held */ 1168/* Assumes rq->lock is held */
1157static void join_domain_rt(struct rq *rq) 1169static void rq_online_rt(struct rq *rq)
1158{ 1170{
1159 if (rq->rt.overloaded) 1171 if (rq->rt.overloaded)
1160 rt_set_overload(rq); 1172 rt_set_overload(rq);
@@ -1163,7 +1175,7 @@ static void join_domain_rt(struct rq *rq)
1163} 1175}
1164 1176
1165/* Assumes rq->lock is held */ 1177/* Assumes rq->lock is held */
1166static void leave_domain_rt(struct rq *rq) 1178static void rq_offline_rt(struct rq *rq)
1167{ 1179{
1168 if (rq->rt.overloaded) 1180 if (rq->rt.overloaded)
1169 rt_clear_overload(rq); 1181 rt_clear_overload(rq);
@@ -1331,8 +1343,8 @@ static const struct sched_class rt_sched_class = {
1331 .load_balance = load_balance_rt, 1343 .load_balance = load_balance_rt,
1332 .move_one_task = move_one_task_rt, 1344 .move_one_task = move_one_task_rt,
1333 .set_cpus_allowed = set_cpus_allowed_rt, 1345 .set_cpus_allowed = set_cpus_allowed_rt,
1334 .join_domain = join_domain_rt, 1346 .rq_online = rq_online_rt,
1335 .leave_domain = leave_domain_rt, 1347 .rq_offline = rq_offline_rt,
1336 .pre_schedule = pre_schedule_rt, 1348 .pre_schedule = pre_schedule_rt,
1337 .post_schedule = post_schedule_rt, 1349 .post_schedule = post_schedule_rt,
1338 .task_wake_up = task_wake_up_rt, 1350 .task_wake_up = task_wake_up_rt,