diff options
author | Mike Travis <travis@sgi.com> | 2008-05-12 15:21:13 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 12:35:12 -0400 |
commit | 363ab6f1424cdea63e5d182312d60e19077b892a (patch) | |
tree | e200197412691015ca8de083155985e7e460ecfc /kernel/rcupreempt.c | |
parent | 068b12772a64c2440ef2f64ac5d780688c06576f (diff) |
core: use performance variant for_each_cpu_mask_nr
Change references from for_each_cpu_mask to for_each_cpu_mask_nr
where appropriate
Reviewed-by: Paul Jackson <pj@sgi.com>
Reviewed-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/rcupreempt.c')
-rw-r--r-- | kernel/rcupreempt.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index e1cdf196a515..18af270125cf 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -657,7 +657,7 @@ rcu_try_flip_idle(void) | |||
657 | 657 | ||
658 | /* Now ask each CPU for acknowledgement of the flip. */ | 658 | /* Now ask each CPU for acknowledgement of the flip. */ |
659 | 659 | ||
660 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { | 660 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { |
661 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | 661 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
662 | dyntick_save_progress_counter(cpu); | 662 | dyntick_save_progress_counter(cpu); |
663 | } | 663 | } |
@@ -675,7 +675,7 @@ rcu_try_flip_waitack(void) | |||
675 | int cpu; | 675 | int cpu; |
676 | 676 | ||
677 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | 677 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
678 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 678 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
679 | if (rcu_try_flip_waitack_needed(cpu) && | 679 | if (rcu_try_flip_waitack_needed(cpu) && |
680 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | 680 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { |
681 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | 681 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
@@ -707,7 +707,7 @@ rcu_try_flip_waitzero(void) | |||
707 | /* Check to see if the sum of the "last" counters is zero. */ | 707 | /* Check to see if the sum of the "last" counters is zero. */ |
708 | 708 | ||
709 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); | 709 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); |
710 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 710 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
711 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; | 711 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; |
712 | if (sum != 0) { | 712 | if (sum != 0) { |
713 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); | 713 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); |
@@ -722,7 +722,7 @@ rcu_try_flip_waitzero(void) | |||
722 | smp_mb(); /* ^^^^^^^^^^^^ */ | 722 | smp_mb(); /* ^^^^^^^^^^^^ */ |
723 | 723 | ||
724 | /* Call for a memory barrier from each CPU. */ | 724 | /* Call for a memory barrier from each CPU. */ |
725 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { | 725 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { |
726 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | 726 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
727 | dyntick_save_progress_counter(cpu); | 727 | dyntick_save_progress_counter(cpu); |
728 | } | 728 | } |
@@ -742,7 +742,7 @@ rcu_try_flip_waitmb(void) | |||
742 | int cpu; | 742 | int cpu; |
743 | 743 | ||
744 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | 744 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
745 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 745 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
746 | if (rcu_try_flip_waitmb_needed(cpu) && | 746 | if (rcu_try_flip_waitmb_needed(cpu) && |
747 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | 747 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { |
748 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | 748 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |