aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-05-12 15:21:13 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 12:35:12 -0400
commit363ab6f1424cdea63e5d182312d60e19077b892a (patch)
treee200197412691015ca8de083155985e7e460ecfc /kernel/sched_rt.c
parent068b12772a64c2440ef2f64ac5d780688c06576f (diff)
core: use performance variant for_each_cpu_mask_nr
Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson <pj@sgi.com> Reviewed-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 060e87b0cb1c..d73386c6e361 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -231,7 +231,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
231 return 1; 231 return 1;
232 232
233 span = sched_rt_period_mask(); 233 span = sched_rt_period_mask();
234 for_each_cpu_mask(i, span) { 234 for_each_cpu_mask_nr(i, span) {
235 int enqueue = 0; 235 int enqueue = 0;
236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237 struct rq *rq = rq_of_rt_rq(rt_rq); 237 struct rq *rq = rq_of_rt_rq(rt_rq);
@@ -272,7 +272,7 @@ static int balance_runtime(struct rt_rq *rt_rq)
272 272
273 spin_lock(&rt_b->rt_runtime_lock); 273 spin_lock(&rt_b->rt_runtime_lock);
274 rt_period = ktime_to_ns(rt_b->rt_period); 274 rt_period = ktime_to_ns(rt_b->rt_period);
275 for_each_cpu_mask(i, rd->span) { 275 for_each_cpu_mask_nr(i, rd->span) {
276 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 276 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
277 s64 diff; 277 s64 diff;
278 278
@@ -1000,7 +1000,7 @@ static int pull_rt_task(struct rq *this_rq)
1000 1000
1001 next = pick_next_task_rt(this_rq); 1001 next = pick_next_task_rt(this_rq);
1002 1002
1003 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { 1003 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1004 if (this_cpu == cpu) 1004 if (this_cpu == cpu)
1005 continue; 1005 continue;
1006 1006