aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-05-12 15:21:13 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 12:39:06 -0400
commitcad0e458d17c643c20c1d38f45a1d26125e6a622 (patch)
tree65328a74709595ca5012eafc951227f90100f8e2 /kernel/time
parent5d7bfd0c4d463d288422032c9903d0452dee141d (diff)
clocksource/events: use performance variant for_each_cpu_mask_nr
Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson <pj@sgi.com> Reviewed-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-broadcast.c3
2 files changed, 3 insertions, 4 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index dadde5361f32..60ceabd53f2e 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
149 149
150 if (next_cpu >= NR_CPUS) 150 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 151 next_cpu = first_cpu(cpu_online_map);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 152 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 153 add_timer_on(&watchdog_timer, next_cpu);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 57a1f02e5ec0..2d0a96346259 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -397,8 +397,7 @@ again:
397 mask = CPU_MASK_NONE; 397 mask = CPU_MASK_NONE;
398 now = ktime_get(); 398 now = ktime_get();
399 /* Find all expired events */ 399 /* Find all expired events */
400 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 400 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
401 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
402 td = &per_cpu(tick_cpu_device, cpu); 401 td = &per_cpu(tick_cpu_device, cpu);
403 if (td->evtdev->next_event.tv64 <= now.tv64) 402 if (td->evtdev->next_event.tv64 <= now.tv64)
404 cpu_set(cpu, mask); 403 cpu_set(cpu, mask);