aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 11:05:04 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-24 11:50:45 -0500
commit758b2cdc6f6a22c702bd8f2344382fb1270b2161 (patch)
tree270aec3d0f6235c1519c16e8dc8148f195e133db /kernel/sched_rt.c
parent1e5ce4f4a755ee498bd9217dae26143afa0d8f31 (diff)
sched: wrap sched_group and sched_domain cpumask accesses.
Impact: trivial wrap of member accesses This eases the transition in the next patch. We also get rid of a temporary cpumask in find_idlest_cpu() thanks to for_each_cpu_and, and sched_balance_self() due to getting weight before setting sd to NULL. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 2bdd44423599..4cd813abc23a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1017,7 +1017,8 @@ static int find_lowest_rq(struct task_struct *task)
1017 cpumask_t domain_mask; 1017 cpumask_t domain_mask;
1018 int best_cpu; 1018 int best_cpu;
1019 1019
1020 cpus_and(domain_mask, sd->span, *lowest_mask); 1020 cpumask_and(&domain_mask, sched_domain_span(sd),
1021 lowest_mask);
1021 1022
1022 best_cpu = pick_optimal_cpu(this_cpu, 1023 best_cpu = pick_optimal_cpu(this_cpu,
1023 &domain_mask); 1024 &domain_mask);